summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-12-31 17:34:16 -0800
committerIngo Molnar <mingo@elte.hu>2009-01-03 18:53:31 +0100
commit7eb19553369c46cc1fa64caf120cbcab1b597f7c (patch)
treeef1a3beae706b9497c845d0a2557ceb4d2754998 /kernel
parent6092848a2a23b660150a38bc06f59d75838d70c8 (diff)
parent8c384cdee3e04d6194a2c2b192b624754f990835 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask into merge-rr-cpumask
Conflicts: arch/x86/kernel/io_apic.c kernel/rcuclassic.c kernel/sched.c kernel/time/tick-sched.c Signed-off-by: Mike Travis <travis@sgi.com> [ mingo@elte.hu: backmerged typo fix for io_apic.c ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.preempt25
-rw-r--r--kernel/Makefile8
-rw-r--r--kernel/acct.c7
-rw-r--r--kernel/auditsc.c255
-rw-r--r--kernel/capability.c288
-rw-r--r--kernel/cgroup.c25
-rw-r--r--kernel/compat.c49
-rw-r--r--kernel/cpu.c144
-rw-r--r--kernel/cred-internals.h21
-rw-r--r--kernel/cred.c588
-rw-r--r--kernel/delayacct.c2
-rw-r--r--kernel/exit.c25
-rw-r--r--kernel/fork.c68
-rw-r--r--kernel/futex.c81
-rw-r--r--kernel/futex_compat.c7
-rw-r--r--kernel/hrtimer.c331
-rw-r--r--kernel/irq/chip.c1
-rw-r--r--kernel/irq/handle.c5
-rw-r--r--kernel/irq/manage.c38
-rw-r--r--kernel/irq/numa_migrate.c11
-rw-r--r--kernel/irq/proc.c32
-rw-r--r--kernel/kallsyms.c16
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kmod.c30
-rw-r--r--kernel/lockdep.c27
-rw-r--r--kernel/nsproxy.c15
-rw-r--r--kernel/panic.c32
-rw-r--r--kernel/posix-timers.c46
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/profile.c38
-rw-r--r--kernel/ptrace.c41
-rw-r--r--kernel/rcuclassic.c29
-rw-r--r--kernel/rcupreempt.c29
-rw-r--r--kernel/rcupreempt_trace.c10
-rw-r--r--kernel/rcutorture.c93
-rw-r--r--kernel/rcutree.c1535
-rw-r--r--kernel/rcutree_trace.c271
-rw-r--r--kernel/resource.c9
-rw-r--r--kernel/sched.c115
-rw-r--r--kernel/sched_fair.c9
-rw-r--r--kernel/sched_rt.c2
-rw-r--r--kernel/sched_stats.h5
-rw-r--r--kernel/signal.c60
-rw-r--r--kernel/smp.c145
-rw-r--r--kernel/softirq.c21
-rw-r--r--kernel/softlockup.c10
-rw-r--r--kernel/stacktrace.c11
-rw-r--r--kernel/stop_machine.c8
-rw-r--r--kernel/sys.c586
-rw-r--r--kernel/sysctl.c26
-rw-r--r--kernel/sysctl_check.c1
-rw-r--r--kernel/taskstats.c39
-rw-r--r--kernel/time/clocksource.c9
-rw-r--r--kernel/time/ntp.c4
-rw-r--r--kernel/time/tick-broadcast.c115
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-sched.c44
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/trace/Kconfig17
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/ring_buffer.c103
-rw-r--r--kernel/trace/trace.c156
-rw-r--r--kernel/trace/trace.h20
-rw-r--r--kernel/trace/trace_boot.c14
-rw-r--r--kernel/trace/trace_bts.c276
-rw-r--r--kernel/trace/trace_functions.c14
-rw-r--r--kernel/trace/trace_functions_graph.c68
-rw-r--r--kernel/trace/trace_hw_branches.c195
-rw-r--r--kernel/trace/trace_mmiotrace.c6
-rw-r--r--kernel/trace/trace_power.c2
-rw-r--r--kernel/trace/trace_sched_switch.c17
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_stack.c49
-rw-r--r--kernel/trace/trace_sysprof.c26
-rw-r--r--kernel/tsacct.c6
-rw-r--r--kernel/uid16.c31
-rw-r--r--kernel/user.c96
-rw-r--r--kernel/user_namespace.c65
-rw-r--r--kernel/workqueue.c34
81 files changed, 4636 insertions, 2040 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 9fdba03dc1fc..bf987b95b356 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -52,28 +52,3 @@ config PREEMPT
endchoice
-config PREEMPT_RCU
- bool "Preemptible RCU"
- depends on PREEMPT
- default n
- help
- This option reduces the latency of the kernel by making certain
- RCU sections preemptible. Normally RCU code is non-preemptible, if
- this option is selected then read-only RCU sections become
- preemptible. This helps latency, but may expose bugs due to
- now-naive assumptions about each RCU read-side critical section
- remaining on a given CPU through its execution.
-
- Say N if you are unsure.
-
-config RCU_TRACE
- bool "Enable tracing for RCU - currently stats in debugfs"
- depends on PREEMPT_RCU
- select DEBUG_FS
- default y
- help
- This option provides tracing in RCU which presents stats
- in debugfs for debugging RCU implementation.
-
- Say Y here if you want to enable RCU tracing
- Say N if you are unsure.
diff --git a/kernel/Makefile b/kernel/Makefile
index 6a212b842d86..e1c5bf3365c0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
- notifier.o ksysfs.o pm_qos_params.o sched_clock.o
+ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
@@ -73,10 +73,10 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
+obj-$(CONFIG_TREE_RCU) += rcutree.o
obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
-ifeq ($(CONFIG_PREEMPT_RCU),y)
-obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o
-endif
+obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
+obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
diff --git a/kernel/acct.c b/kernel/acct.c
index f6006a60df5d..d57b7cbb98b6 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -530,15 +530,14 @@ static void do_acct_process(struct bsd_acct_struct *acct,
do_div(elapsed, AHZ);
ac.ac_btime = get_seconds() - elapsed;
/* we really need to bite the bullet and change layout */
- ac.ac_uid = current->uid;
- ac.ac_gid = current->gid;
+ current_uid_gid(&ac.ac_uid, &ac.ac_gid);
#if ACCT_VERSION==2
ac.ac_ahz = AHZ;
#endif
#if ACCT_VERSION==1 || ACCT_VERSION==2
/* backward-compatible 16 bit fields */
- ac.ac_uid16 = current->uid;
- ac.ac_gid16 = current->gid;
+ ac.ac_uid16 = ac.ac_uid;
+ ac.ac_gid16 = ac.ac_gid;
#endif
#if ACCT_VERSION==3
ac.ac_pid = task_tgid_nr_ns(current, ns);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 2a3f0afc4d2a..4819f3711973 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -65,6 +65,7 @@
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <linux/inotify.h>
+#include <linux/capability.h>
#include "audit.h"
@@ -84,6 +85,15 @@ int audit_n_rules;
/* determines whether we collect data for signals sent */
int audit_signals;
+struct audit_cap_data {
+ kernel_cap_t permitted;
+ kernel_cap_t inheritable;
+ union {
+ unsigned int fE; /* effective bit of a file capability */
+ kernel_cap_t effective; /* effective set of a process */
+ };
+};
+
/* When fs/namei.c:getname() is called, we store the pointer in name and
* we don't let putname() free it (instead we free all of the saved
* pointers at syscall exit time).
@@ -100,6 +110,8 @@ struct audit_names {
gid_t gid;
dev_t rdev;
u32 osid;
+ struct audit_cap_data fcap;
+ unsigned int fcap_ver;
};
struct audit_aux_data {
@@ -184,6 +196,20 @@ struct audit_aux_data_pids {
int pid_count;
};
+struct audit_aux_data_bprm_fcaps {
+ struct audit_aux_data d;
+ struct audit_cap_data fcap;
+ unsigned int fcap_ver;
+ struct audit_cap_data old_pcap;
+ struct audit_cap_data new_pcap;
+};
+
+struct audit_aux_data_capset {
+ struct audit_aux_data d;
+ pid_t pid;
+ struct audit_cap_data cap;
+};
+
struct audit_tree_refs {
struct audit_tree_refs *next;
struct audit_chunk *c[31];
@@ -421,6 +447,7 @@ static int audit_filter_rules(struct task_struct *tsk,
struct audit_names *name,
enum audit_state *state)
{
+ const struct cred *cred = get_task_cred(tsk);
int i, j, need_sid = 1;
u32 sid;
@@ -440,28 +467,28 @@ static int audit_filter_rules(struct task_struct *tsk,
}
break;
case AUDIT_UID:
- result = audit_comparator(tsk->uid, f->op, f->val);
+ result = audit_comparator(cred->uid, f->op, f->val);
break;
case AUDIT_EUID:
- result = audit_comparator(tsk->euid, f->op, f->val);
+ result = audit_comparator(cred->euid, f->op, f->val);
break;
case AUDIT_SUID:
- result = audit_comparator(tsk->suid, f->op, f->val);
+ result = audit_comparator(cred->suid, f->op, f->val);
break;
case AUDIT_FSUID:
- result = audit_comparator(tsk->fsuid, f->op, f->val);
+ result = audit_comparator(cred->fsuid, f->op, f->val);
break;
case AUDIT_GID:
- result = audit_comparator(tsk->gid, f->op, f->val);
+ result = audit_comparator(cred->gid, f->op, f->val);
break;
case AUDIT_EGID:
- result = audit_comparator(tsk->egid, f->op, f->val);
+ result = audit_comparator(cred->egid, f->op, f->val);
break;
case AUDIT_SGID:
- result = audit_comparator(tsk->sgid, f->op, f->val);
+ result = audit_comparator(cred->sgid, f->op, f->val);
break;
case AUDIT_FSGID:
- result = audit_comparator(tsk->fsgid, f->op, f->val);
+ result = audit_comparator(cred->fsgid, f->op, f->val);
break;
case AUDIT_PERS:
result = audit_comparator(tsk->personality, f->op, f->val);
@@ -615,8 +642,10 @@ static int audit_filter_rules(struct task_struct *tsk,
break;
}
- if (!result)
+ if (!result) {
+ put_cred(cred);
return 0;
+ }
}
if (rule->filterkey && ctx)
ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
@@ -624,6 +653,7 @@ static int audit_filter_rules(struct task_struct *tsk,
case AUDIT_NEVER: *state = AUDIT_DISABLED; break;
case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break;
}
+ put_cred(cred);
return 1;
}
@@ -1171,8 +1201,38 @@ static void audit_log_execve_info(struct audit_context *context,
kfree(buf);
}
+static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
+{
+ int i;
+
+ audit_log_format(ab, " %s=", prefix);
+ CAP_FOR_EACH_U32(i) {
+ audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
+ }
+}
+
+static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
+{
+ kernel_cap_t *perm = &name->fcap.permitted;
+ kernel_cap_t *inh = &name->fcap.inheritable;
+ int log = 0;
+
+ if (!cap_isclear(*perm)) {
+ audit_log_cap(ab, "cap_fp", perm);
+ log = 1;
+ }
+ if (!cap_isclear(*inh)) {
+ audit_log_cap(ab, "cap_fi", inh);
+ log = 1;
+ }
+
+ if (log)
+ audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver);
+}
+
static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
{
+ const struct cred *cred;
int i, call_panic = 0;
struct audit_buffer *ab;
struct audit_aux_data *aux;
@@ -1182,14 +1242,15 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
context->pid = tsk->pid;
if (!context->ppid)
context->ppid = sys_getppid();
- context->uid = tsk->uid;
- context->gid = tsk->gid;
- context->euid = tsk->euid;
- context->suid = tsk->suid;
- context->fsuid = tsk->fsuid;
- context->egid = tsk->egid;
- context->sgid = tsk->sgid;
- context->fsgid = tsk->fsgid;
+ cred = current_cred();
+ context->uid = cred->uid;
+ context->gid = cred->gid;
+ context->euid = cred->euid;
+ context->suid = cred->suid;
+ context->fsuid = cred->fsuid;
+ context->egid = cred->egid;
+ context->sgid = cred->sgid;
+ context->fsgid = cred->fsgid;
context->personality = tsk->personality;
ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL);
@@ -1334,6 +1395,28 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
audit_log_format(ab, "fd0=%d fd1=%d", axs->fd[0], axs->fd[1]);
break; }
+ case AUDIT_BPRM_FCAPS: {
+ struct audit_aux_data_bprm_fcaps *axs = (void *)aux;
+ audit_log_format(ab, "fver=%x", axs->fcap_ver);
+ audit_log_cap(ab, "fp", &axs->fcap.permitted);
+ audit_log_cap(ab, "fi", &axs->fcap.inheritable);
+ audit_log_format(ab, " fe=%d", axs->fcap.fE);
+ audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted);
+ audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable);
+ audit_log_cap(ab, "old_pe", &axs->old_pcap.effective);
+ audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted);
+ audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable);
+ audit_log_cap(ab, "new_pe", &axs->new_pcap.effective);
+ break; }
+
+ case AUDIT_CAPSET: {
+ struct audit_aux_data_capset *axs = (void *)aux;
+ audit_log_format(ab, "pid=%d", axs->pid);
+ audit_log_cap(ab, "cap_pi", &axs->cap.inheritable);
+ audit_log_cap(ab, "cap_pp", &axs->cap.permitted);
+ audit_log_cap(ab, "cap_pe", &axs->cap.effective);
+ break; }
+
}
audit_log_end(ab);
}
@@ -1421,6 +1504,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
}
}
+ audit_log_fcaps(ab, n);
+
audit_log_end(ab);
}
@@ -1802,8 +1887,36 @@ static int audit_inc_name_count(struct audit_context *context,
return 0;
}
+
+static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
+{
+ struct cpu_vfs_cap_data caps;
+ int rc;
+
+ memset(&name->fcap.permitted, 0, sizeof(kernel_cap_t));
+ memset(&name->fcap.inheritable, 0, sizeof(kernel_cap_t));
+ name->fcap.fE = 0;
+ name->fcap_ver = 0;
+
+ if (!dentry)
+ return 0;
+
+ rc = get_vfs_caps_from_disk(dentry, &caps);
+ if (rc)
+ return rc;
+
+ name->fcap.permitted = caps.permitted;
+ name->fcap.inheritable = caps.inheritable;
+ name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
+ name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
+
+ return 0;
+}
+
+
/* Copy inode data into an audit_names. */
-static void audit_copy_inode(struct audit_names *name, const struct inode *inode)
+static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
+ const struct inode *inode)
{
name->ino = inode->i_ino;
name->dev = inode->i_sb->s_dev;
@@ -1812,6 +1925,7 @@ static void audit_copy_inode(struct audit_names *name, const struct inode *inode
name->gid = inode->i_gid;
name->rdev = inode->i_rdev;
security_inode_getsecid(inode, &name->osid);
+ audit_copy_fcaps(name, dentry);
}
/**
@@ -1846,7 +1960,7 @@ void __audit_inode(const char *name, const struct dentry *dentry)
context->names[idx].name = NULL;
}
handle_path(dentry);
- audit_copy_inode(&context->names[idx], inode);
+ audit_copy_inode(&context->names[idx], dentry, inode);
}
/**
@@ -1907,7 +2021,7 @@ void __audit_inode_child(const char *dname, const struct dentry *dentry,
if (!strcmp(dname, n->name) ||
!audit_compare_dname_path(dname, n->name, &dirlen)) {
if (inode)
- audit_copy_inode(n, inode);
+ audit_copy_inode(n, NULL, inode);
else
n->ino = (unsigned long)-1;
found_child = n->name;
@@ -1921,7 +2035,7 @@ add_names:
return;
idx = context->name_count - 1;
context->names[idx].name = NULL;
- audit_copy_inode(&context->names[idx], parent);
+ audit_copy_inode(&context->names[idx], NULL, parent);
}
if (!found_child) {
@@ -1942,7 +2056,7 @@ add_names:
}
if (inode)
- audit_copy_inode(&context->names[idx], inode);
+ audit_copy_inode(&context->names[idx], NULL, inode);
else
context->names[idx].ino = (unsigned long)-1;
}
@@ -1996,7 +2110,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
audit_log_format(ab, "login pid=%d uid=%u "
"old auid=%u new auid=%u"
" old ses=%u new ses=%u",
- task->pid, task->uid,
+ task->pid, task_uid(task),
task->loginuid, loginuid,
task->sessionid, sessionid);
audit_log_end(ab);
@@ -2379,7 +2493,7 @@ void __audit_ptrace(struct task_struct *t)
context->target_pid = t->pid;
context->target_auid = audit_get_loginuid(t);
- context->target_uid = t->uid;
+ context->target_uid = task_uid(t);
context->target_sessionid = audit_get_sessionid(t);
security_task_getsecid(t, &context->target_sid);
memcpy(context->target_comm, t->comm, TASK_COMM_LEN);
@@ -2398,6 +2512,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
struct audit_aux_data_pids *axp;
struct task_struct *tsk = current;
struct audit_context *ctx = tsk->audit_context;
+ uid_t uid = current_uid(), t_uid = task_uid(t);
if (audit_pid && t->tgid == audit_pid) {
if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
@@ -2405,7 +2520,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
if (tsk->loginuid != -1)
audit_sig_uid = tsk->loginuid;
else
- audit_sig_uid = tsk->uid;
+ audit_sig_uid = uid;
security_task_getsecid(tsk, &audit_sig_sid);
}
if (!audit_signals || audit_dummy_context())
@@ -2417,7 +2532,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
if (!ctx->target_pid) {
ctx->target_pid = t->tgid;
ctx->target_auid = audit_get_loginuid(t);
- ctx->target_uid = t->uid;
+ ctx->target_uid = t_uid;
ctx->target_sessionid = audit_get_sessionid(t);
security_task_getsecid(t, &ctx->target_sid);
memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN);
@@ -2438,7 +2553,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
axp->target_pid[axp->pid_count] = t->tgid;
axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
- axp->target_uid[axp->pid_count] = t->uid;
+ axp->target_uid[axp->pid_count] = t_uid;
axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
security_task_getsecid(t, &axp->target_sid[axp->pid_count]);
memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN);
@@ -2448,6 +2563,86 @@ int __audit_signal_info(int sig, struct task_struct *t)
}
/**
+ * __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps
+ * @bprm: pointer to the bprm being processed
+ * @new: the proposed new credentials
+ * @old: the old credentials
+ *
+ * Simply check if the proc already has the caps given by the file and if not
+ * store the priv escalation info for later auditing at the end of the syscall
+ *
+ * -Eric
+ */
+int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
+ const struct cred *new, const struct cred *old)
+{
+ struct audit_aux_data_bprm_fcaps *ax;
+ struct audit_context *context = current->audit_context;
+ struct cpu_vfs_cap_data vcaps;
+ struct dentry *dentry;
+
+ ax = kmalloc(sizeof(*ax), GFP_KERNEL);
+ if (!ax)
+ return -ENOMEM;
+
+ ax->d.type = AUDIT_BPRM_FCAPS;
+ ax->d.next = context->aux;
+ context->aux = (void *)ax;
+
+ dentry = dget(bprm->file->f_dentry);
+ get_vfs_caps_from_disk(dentry, &vcaps);
+ dput(dentry);
+
+ ax->fcap.permitted = vcaps.permitted;
+ ax->fcap.inheritable = vcaps.inheritable;
+ ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
+ ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
+
+ ax->old_pcap.permitted = old->cap_permitted;
+ ax->old_pcap.inheritable = old->cap_inheritable;
+ ax->old_pcap.effective = old->cap_effective;
+
+ ax->new_pcap.permitted = new->cap_permitted;
+ ax->new_pcap.inheritable = new->cap_inheritable;
+ ax->new_pcap.effective = new->cap_effective;
+ return 0;
+}
+
+/**
+ * __audit_log_capset - store information about the arguments to the capset syscall
+ * @pid: target pid of the capset call
+ * @new: the new credentials
+ * @old: the old (current) credentials
+ *
+ * Record the aguments userspace sent to sys_capset for later printing by the
+ * audit system if applicable
+ */
+int __audit_log_capset(pid_t pid,
+ const struct cred *new, const struct cred *old)
+{
+ struct audit_aux_data_capset *ax;
+ struct audit_context *context = current->audit_context;
+
+ if (likely(!audit_enabled || !context || context->dummy))
+ return 0;
+
+ ax = kmalloc(sizeof(*ax), GFP_KERNEL);
+ if (!ax)
+ return -ENOMEM;
+
+ ax->d.type = AUDIT_CAPSET;
+ ax->d.next = context->aux;
+ context->aux = (void *)ax;
+
+ ax->pid = pid;
+ ax->cap.effective = new->cap_effective;
+ ax->cap.inheritable = new->cap_effective;
+ ax->cap.permitted = new->cap_permitted;
+
+ return 0;
+}
+
+/**
* audit_core_dumps - record information about processes that end abnormally
* @signr: signal value
*
@@ -2458,7 +2653,8 @@ void audit_core_dumps(long signr)
{
struct audit_buffer *ab;
u32 sid;
- uid_t auid = audit_get_loginuid(current);
+ uid_t auid = audit_get_loginuid(current), uid;
+ gid_t gid;
unsigned int sessionid = audit_get_sessionid(current);
if (!audit_enabled)
@@ -2468,8 +2664,9 @@ void audit_core_dumps(long signr)
return;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
+ current_uid_gid(&uid, &gid);
audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
- auid, current->uid, current->gid, sessionid);
+ auid, uid, gid, sessionid);
security_task_getsecid(current, &sid);
if (sid) {
char *ctx = NULL;
diff --git a/kernel/capability.c b/kernel/capability.c
index 33e51e78c2d8..36b4b4daebec 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -7,6 +7,7 @@
* 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net>
*/
+#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -14,12 +15,7 @@
#include <linux/syscalls.h>
#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
-
-/*
- * This lock protects task->cap_* for all tasks including current.
- * Locking rule: acquire this prior to tasklist_lock.
- */
-static DEFINE_SPINLOCK(task_capability_lock);
+#include "cred-internals.h"
/*
* Leveraged for setting/resetting capabilities
@@ -33,6 +29,17 @@ EXPORT_SYMBOL(__cap_empty_set);
EXPORT_SYMBOL(__cap_full_set);
EXPORT_SYMBOL(__cap_init_eff_set);
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+int file_caps_enabled = 1;
+
+static int __init file_caps_disable(char *str)
+{
+ file_caps_enabled = 0;
+ return 1;
+}
+__setup("no_file_caps", file_caps_disable);
+#endif
+
/*
* More recent versions of libcap are available from:
*
@@ -115,167 +122,12 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
return 0;
}
-#ifndef CONFIG_SECURITY_FILE_CAPABILITIES
-
-/*
- * Without filesystem capability support, we nominally support one process
- * setting the capabilities of another
- */
-static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
- kernel_cap_t *pIp, kernel_cap_t *pPp)
-{
- struct task_struct *target;
- int ret;
-
- spin_lock(&task_capability_lock);
- read_lock(&tasklist_lock);
-
- if (pid && pid != task_pid_vnr(current)) {
- target = find_task_by_vpid(pid);
- if (!target) {
- ret = -ESRCH;
- goto out;
- }
- } else
- target = current;
-
- ret = security_capget(target, pEp, pIp, pPp);
-
-out:
- read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
-
- return ret;
-}
-
-/*
- * cap_set_pg - set capabilities for all processes in a given process
- * group. We call this holding task_capability_lock and tasklist_lock.
- */
-static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted)
-{
- struct task_struct *g, *target;
- int ret = -EPERM;
- int found = 0;
- struct pid *pgrp;
-
- spin_lock(&task_capability_lock);
- read_lock(&tasklist_lock);
-
- pgrp = find_vpid(pgrp_nr);
- do_each_pid_task(pgrp, PIDTYPE_PGID, g) {
- target = g;
- while_each_thread(g, target) {
- if (!security_capset_check(target, effective,
- inheritable, permitted)) {
- security_capset_set(target, effective,
- inheritable, permitted);
- ret = 0;
- }
- found = 1;
- }
- } while_each_pid_task(pgrp, PIDTYPE_PGID, g);
-
- read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
-
- if (!found)
- ret = 0;
- return ret;
-}
-
-/*
- * cap_set_all - set capabilities for all processes other than init
- * and self. We call this holding task_capability_lock and tasklist_lock.
- */
-static inline int cap_set_all(kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted)
-{
- struct task_struct *g, *target;
- int ret = -EPERM;
- int found = 0;
-
- spin_lock(&task_capability_lock);
- read_lock(&tasklist_lock);
-
- do_each_thread(g, target) {
- if (target == current
- || is_container_init(target->group_leader))
- continue;
- found = 1;
- if (security_capset_check(target, effective, inheritable,
- permitted))
- continue;
- ret = 0;
- security_capset_set(target, effective, inheritable, permitted);
- } while_each_thread(g, target);
-
- read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
-
- if (!found)
- ret = 0;
-
- return ret;
-}
-
-/*
- * Given the target pid does not refer to the current process we
- * need more elaborate support... (This support is not present when
- * filesystem capabilities are configured.)
- */
-static inline int do_sys_capset_other_tasks(pid_t pid, kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted)
-{
- struct task_struct *target;
- int ret;
-
- if (!capable(CAP_SETPCAP))
- return -EPERM;
-
- if (pid == -1) /* all procs other than current and init */
- return cap_set_all(effective, inheritable, permitted);
-
- else if (pid < 0) /* all procs in process group */
- return cap_set_pg(-pid, effective, inheritable, permitted);
-
- /* target != current */
- spin_lock(&task_capability_lock);
- read_lock(&tasklist_lock);
-
- target = find_task_by_vpid(pid);
- if (!target)
- ret = -ESRCH;
- else {
- ret = security_capset_check(target, effective, inheritable,
- permitted);
-
- /* having verified that the proposed changes are legal,
- we now put them into effect. */
- if (!ret)
- security_capset_set(target, effective, inheritable,
- permitted);
- }
-
- read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
-
- return ret;
-}
-
-#else /* ie., def CONFIG_SECURITY_FILE_CAPABILITIES */
-
/*
- * If we have configured with filesystem capability support, then the
- * only thing that can change the capabilities of the current process
- * is the current process. As such, we can't be in this code at the
- * same time as we are in the process of setting capabilities in this
- * process. The net result is that we can limit our use of locks to
- * when we are reading the caps of another process.
+ * The only thing that can change the capabilities of the current
+ * process is the current process. As such, we can't be in this code
+ * at the same time as we are in the process of setting capabilities
+ * in this process. The net result is that we can limit our use of
+ * locks to when we are reading the caps of another process.
*/
static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
kernel_cap_t *pIp, kernel_cap_t *pPp)
@@ -285,7 +137,6 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
if (pid && (pid != task_pid_vnr(current))) {
struct task_struct *target;
- spin_lock(&task_capability_lock);
read_lock(&tasklist_lock);
target = find_task_by_vpid(pid);
@@ -295,50 +146,12 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
ret = security_capget(target, pEp, pIp, pPp);
read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
} else
ret = security_capget(current, pEp, pIp, pPp);
return ret;
}
-/*
- * With filesystem capability support configured, the kernel does not
- * permit the changing of capabilities in one process by another
- * process. (CAP_SETPCAP has much less broad semantics when configured
- * this way.)
- */
-static inline int do_sys_capset_other_tasks(pid_t pid,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted)
-{
- return -EPERM;
-}
-
-#endif /* ie., ndef CONFIG_SECURITY_FILE_CAPABILITIES */
-
-/*
- * Atomically modify the effective capabilities returning the original
- * value. No permission check is performed here - it is assumed that the
- * caller is permitted to set the desired effective capabilities.
- */
-kernel_cap_t cap_set_effective(const kernel_cap_t pE_new)
-{
- kernel_cap_t pE_old;
-
- spin_lock(&task_capability_lock);
-
- pE_old = current->cap_effective;
- current->cap_effective = pE_new;
-
- spin_unlock(&task_capability_lock);
-
- return pE_old;
-}
-
-EXPORT_SYMBOL(cap_set_effective);
-
/**
* sys_capget - get the capabilities of a given process.
* @header: pointer to struct that contains capability version and
@@ -366,7 +179,6 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
return -EINVAL;
ret = cap_get_target_pid(pid, &pE, &pI, &pP);
-
if (!ret) {
struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
unsigned i;
@@ -412,16 +224,14 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
* @data: pointer to struct that contains the effective, permitted,
* and inheritable capabilities
*
- * Set capabilities for a given process, all processes, or all
- * processes in a given process group.
+ * Set capabilities for the current process only. The ability to any other
+ * process(es) has been deprecated and removed.
*
* The restrictions on setting capabilities are specified as:
*
- * [pid is for the 'target' task. 'current' is the calling task.]
- *
- * I: any raised capabilities must be a subset of the (old current) permitted
- * P: any raised capabilities must be a subset of the (old current) permitted
- * E: must be set to a subset of (new target) permitted
+ * I: any raised capabilities must be a subset of the old permitted
+ * P: any raised capabilities must be a subset of the old permitted
+ * E: must be set to a subset of new permitted
*
* Returns 0 on success and < 0 on error.
*/
@@ -430,6 +240,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
unsigned i, tocopy;
kernel_cap_t inheritable, permitted, effective;
+ struct cred *new;
int ret;
pid_t pid;
@@ -440,10 +251,13 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (get_user(pid, &header->pid))
return -EFAULT;
- if (copy_from_user(&kdata, data, tocopy
- * sizeof(struct __user_cap_data_struct))) {
+ /* may only affect current now */
+ if (pid != 0 && pid != task_pid_vnr(current))
+ return -EPERM;
+
+ if (copy_from_user(&kdata, data,
+ tocopy * sizeof(struct __user_cap_data_struct)))
return -EFAULT;
- }
for (i = 0; i < tocopy; i++) {
effective.cap[i] = kdata[i].effective;
@@ -457,32 +271,23 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
i++;
}
- if (pid && (pid != task_pid_vnr(current)))
- ret = do_sys_capset_other_tasks(pid, &effective, &inheritable,
- &permitted);
- else {
- /*
- * This lock is required even when filesystem
- * capability support is configured - it protects the
- * sys_capget() call from returning incorrect data in
- * the case that the targeted process is not the
- * current one.
- */
- spin_lock(&task_capability_lock);
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
- ret = security_capset_check(current, &effective, &inheritable,
- &permitted);
- /*
- * Having verified that the proposed changes are
- * legal, we now put them into effect.
- */
- if (!ret)
- security_capset_set(current, &effective, &inheritable,
- &permitted);
- spin_unlock(&task_capability_lock);
- }
+ ret = security_capset(new, current_cred(),
+ &effective, &inheritable, &permitted);
+ if (ret < 0)
+ goto error;
+
+ ret = audit_log_capset(pid, new, current_cred());
+ if (ret < 0)
+ return ret;
+ return commit_creds(new);
+error:
+ abort_creds(new);
return ret;
}
@@ -498,6 +303,11 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
*/
int capable(int cap)
{
+ if (unlikely(!cap_valid(cap))) {
+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
+ BUG();
+ }
+
if (has_capability(current, cap)) {
current->flags |= PF_SUPERPRIV;
return 1;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8185a0f09594..48348dde6d81 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -571,8 +571,8 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
if (inode) {
inode->i_mode = mode;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
@@ -1024,7 +1024,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
if (ret == -EBUSY) {
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
- goto drop_new_super;
+ goto free_cg_links;
}
/* EBUSY should be the only error here */
@@ -1073,10 +1073,11 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
return simple_set_mnt(mnt, sb);
+ free_cg_links:
+ free_cg_links(&tmp_cg_links);
drop_new_super:
up_write(&sb->s_umount);
deactivate_super(sb);
- free_cg_links(&tmp_cg_links);
return ret;
}
@@ -1279,6 +1280,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
{
struct task_struct *tsk;
+ const struct cred *cred = current_cred(), *tcred;
int ret;
if (pid) {
@@ -1288,14 +1290,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
rcu_read_unlock();
return -ESRCH;
}
- get_task_struct(tsk);
- rcu_read_unlock();
- if ((current->euid) && (current->euid != tsk->uid)
- && (current->euid != tsk->suid)) {
- put_task_struct(tsk);
+ tcred = __task_cred(tsk);
+ if (cred->euid &&
+ cred->euid != tcred->uid &&
+ cred->euid != tcred->suid) {
+ rcu_read_unlock();
return -EACCES;
}
+ get_task_struct(tsk);
+ rcu_read_unlock();
} else {
tsk = current;
get_task_struct(tsk);
@@ -2934,9 +2938,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
again:
root = subsys->root;
if (root == &rootnode) {
- printk(KERN_INFO
- "Not cloning cgroup for unused subsystem %s\n",
- subsys->name);
mutex_unlock(&cgroup_mutex);
return 0;
}
diff --git a/kernel/compat.c b/kernel/compat.c
index 8eafe3eb50d9..d52e2ec1deb5 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -454,16 +454,16 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
}
static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
- unsigned len, cpumask_t *new_mask)
+ unsigned len, struct cpumask *new_mask)
{
unsigned long *k;
- if (len < sizeof(cpumask_t))
- memset(new_mask, 0, sizeof(cpumask_t));
- else if (len > sizeof(cpumask_t))
- len = sizeof(cpumask_t);
+ if (len < cpumask_size())
+ memset(new_mask, 0, cpumask_size());
+ else if (len > cpumask_size())
+ len = cpumask_size();
- k = cpus_addr(*new_mask);
+ k = cpumask_bits(new_mask);
return compat_get_bitmap(k, user_mask_ptr, len * 8);
}
@@ -471,40 +471,51 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr)
{
- cpumask_t new_mask;
+ cpumask_var_t new_mask;
int retval;
- retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask);
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask);
if (retval)
- return retval;
+ goto out;
- return sched_setaffinity(pid, &new_mask);
+ retval = sched_setaffinity(pid, new_mask);
+out:
+ free_cpumask_var(new_mask);
+ return retval;
}
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
compat_ulong_t __user *user_mask_ptr)
{
int ret;
- cpumask_t mask;
+ cpumask_var_t mask;
unsigned long *k;
- unsigned int min_length = sizeof(cpumask_t);
+ unsigned int min_length = cpumask_size();
- if (NR_CPUS <= BITS_PER_COMPAT_LONG)
+ if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
min_length = sizeof(compat_ulong_t);
if (len < min_length)
return -EINVAL;
- ret = sched_getaffinity(pid, &mask);
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ ret = sched_getaffinity(pid, mask);
if (ret < 0)
- return ret;
+ goto out;
- k = cpus_addr(mask);
+ k = cpumask_bits(mask);
ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
- if (ret)
- return ret;
+ if (ret == 0)
+ ret = min_length;
- return min_length;
+out:
+ free_cpumask_var(mask);
+ return ret;
}
int get_compat_itimerspec(struct itimerspec *dst,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index bae131a1211b..47fff3b63cbf 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,30 +15,8 @@
#include <linux/stop_machine.h>
#include <linux/mutex.h>
-/*
- * Represents all cpu's present in the system
- * In systems capable of hotplug, this map could dynamically grow
- * as new cpu's are detected in the system via any platform specific
- * method, such as ACPI for e.g.
- */
-cpumask_t cpu_present_map __read_mostly;
-EXPORT_SYMBOL(cpu_present_map);
-
-/*
- * Represents all cpu's that are currently online.
- */
-cpumask_t cpu_online_map __read_mostly;
-EXPORT_SYMBOL(cpu_online_map);
-
-#ifdef CONFIG_INIT_ALL_POSSIBLE
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
-#else
-cpumask_t cpu_possible_map __read_mostly;
-#endif
-EXPORT_SYMBOL(cpu_possible_map);
-
#ifdef CONFIG_SMP
-/* Serializes the updates to cpu_online_map, cpu_present_map */
+/* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock);
static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
@@ -65,8 +43,6 @@ void __init cpu_hotplug_init(void)
cpu_hotplug.refcount = 0;
}
-cpumask_t cpu_active_map;
-
#ifdef CONFIG_HOTPLUG_CPU
void get_online_cpus(void)
@@ -97,7 +73,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
/*
* The following two API's must be used when attempting
- * to serialize the updates to cpu_online_map, cpu_present_map.
+ * to serialize the updates to cpu_online_mask, cpu_present_mask.
*/
void cpu_maps_update_begin(void)
{
@@ -218,7 +194,7 @@ static int __ref take_cpu_down(void *_param)
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
int err, nr_calls = 0;
- cpumask_t old_allowed, tmp;
+ cpumask_var_t old_allowed;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
@@ -232,6 +208,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
if (!cpu_online(cpu))
return -EINVAL;
+ if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+ return -ENOMEM;
+
cpu_hotplug_begin();
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
@@ -246,13 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
}
/* Ensure that we are not runnable on dying cpu */
- old_allowed = current->cpus_allowed;
- cpus_setall(tmp);
- cpu_clear(cpu, tmp);
- set_cpus_allowed_ptr(current, &tmp);
- tmp = cpumask_of_cpu(cpu);
+ cpumask_copy(old_allowed, &current->cpus_allowed);
+ set_cpus_allowed_ptr(current,
+ cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
- err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
@@ -278,7 +255,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
check_for_tasks(cpu);
out_allowed:
- set_cpus_allowed_ptr(current, &old_allowed);
+ set_cpus_allowed_ptr(current, old_allowed);
out_release:
cpu_hotplug_done();
if (!err) {
@@ -286,6 +263,7 @@ out_release:
hcpu) == NOTIFY_BAD)
BUG();
}
+ free_cpumask_var(old_allowed);
return err;
}
@@ -304,7 +282,7 @@ int __ref cpu_down(unsigned int cpu)
/*
* Make sure the all cpus did the reschedule and are not
- * using stale version of the cpu_active_map.
+ * using stale version of the cpu_active_mask.
* This is not strictly necessary becuase stop_machine()
* that we run down the line already provides the required
* synchronization. But it's really a side effect and we do not
@@ -368,7 +346,7 @@ out_notify:
int __cpuinit cpu_up(unsigned int cpu)
{
int err = 0;
- if (!cpu_isset(cpu, cpu_possible_map)) {
+ if (!cpu_possible(cpu)) {
printk(KERN_ERR "can't online cpu %d because it is not "
"configured as may-hotadd at boot time\n", cpu);
#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
@@ -393,25 +371,25 @@ out:
}
#ifdef CONFIG_PM_SLEEP_SMP
-static cpumask_t frozen_cpus;
+static cpumask_var_t frozen_cpus;
int disable_nonboot_cpus(void)
{
int cpu, first_cpu, error = 0;
cpu_maps_update_begin();
- first_cpu = first_cpu(cpu_online_map);
+ first_cpu = cpumask_first(cpu_online_mask);
/* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
- cpus_clear(frozen_cpus);
+ cpumask_clear(frozen_cpus);
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
continue;
error = _cpu_down(cpu, 1);
if (!error) {
- cpu_set(cpu, frozen_cpus);
+ cpumask_set_cpu(cpu, frozen_cpus);
printk("CPU%d is down\n", cpu);
} else {
printk(KERN_ERR "Error taking CPU%d down: %d\n",
@@ -437,11 +415,11 @@ void __ref enable_nonboot_cpus(void)
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
cpu_hotplug_disabled = 0;
- if (cpus_empty(frozen_cpus))
+ if (cpumask_empty(frozen_cpus))
goto out;
printk("Enabling non-boot CPUs ...\n");
- for_each_cpu_mask_nr(cpu, frozen_cpus) {
+ for_each_cpu(cpu, frozen_cpus) {
error = _cpu_up(cpu, 1);
if (!error) {
printk("CPU%d is up\n", cpu);
@@ -449,10 +427,18 @@ void __ref enable_nonboot_cpus(void)
}
printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
}
- cpus_clear(frozen_cpus);
+ cpumask_clear(frozen_cpus);
out:
cpu_maps_update_done();
}
+
+static int alloc_frozen_cpus(void)
+{
+ if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
+ return -ENOMEM;
+ return 0;
+}
+core_initcall(alloc_frozen_cpus);
#endif /* CONFIG_PM_SLEEP_SMP */
/**
@@ -468,7 +454,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
unsigned long val = CPU_STARTING;
#ifdef CONFIG_PM_SLEEP_SMP
- if (cpu_isset(cpu, frozen_cpus))
+ if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
@@ -480,7 +466,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
* cpu_bit_bitmap[] is a special, "compressed" data structure that
* represents all NR_CPUS bits binary values of 1<<nr.
*
- * It is used by cpumask_of_cpu() to get a constant address to a CPU
+ * It is used by cpumask_of() to get a constant address to a CPU
* mask value that has a single bit set only.
*/
@@ -503,3 +489,71 @@ EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
+
+#ifdef CONFIG_INIT_ALL_POSSIBLE
+static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
+ = CPU_BITS_ALL;
+#else
+static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
+#endif
+const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
+EXPORT_SYMBOL(cpu_possible_mask);
+
+static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
+EXPORT_SYMBOL(cpu_online_mask);
+
+static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
+EXPORT_SYMBOL(cpu_present_mask);
+
+static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
+EXPORT_SYMBOL(cpu_active_mask);
+
+void set_cpu_possible(unsigned int cpu, bool possible)
+{
+ if (possible)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
+}
+
+void set_cpu_present(unsigned int cpu, bool present)
+{
+ if (present)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
+}
+
+void set_cpu_online(unsigned int cpu, bool online)
+{
+ if (online)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
+}
+
+void set_cpu_active(unsigned int cpu, bool active)
+{
+ if (active)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
+}
+
+void init_cpu_present(const struct cpumask *src)
+{
+ cpumask_copy(to_cpumask(cpu_present_bits), src);
+}
+
+void init_cpu_possible(const struct cpumask *src)
+{
+ cpumask_copy(to_cpumask(cpu_possible_bits), src);
+}
+
+void init_cpu_online(const struct cpumask *src)
+{
+ cpumask_copy(to_cpumask(cpu_online_bits), src);
+}
diff --git a/kernel/cred-internals.h b/kernel/cred-internals.h
new file mode 100644
index 000000000000..2dc4fc2d0bf1
--- /dev/null
+++ b/kernel/cred-internals.h
@@ -0,0 +1,21 @@
+/* Internal credentials stuff
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+/*
+ * user.c
+ */
+static inline void sched_switch_user(struct task_struct *p)
+{
+#ifdef CONFIG_USER_SCHED
+ sched_move_task(p);
+#endif /* CONFIG_USER_SCHED */
+}
+
diff --git a/kernel/cred.c b/kernel/cred.c
new file mode 100644
index 000000000000..ff7bc071991c
--- /dev/null
+++ b/kernel/cred.c
@@ -0,0 +1,588 @@
+/* Task credentials management - see Documentation/credentials.txt
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/cred.h>
+#include <linux/sched.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/init_task.h>
+#include <linux/security.h>
+#include <linux/cn_proc.h>
+#include "cred-internals.h"
+
+static struct kmem_cache *cred_jar;
+
+/*
+ * The common credentials for the initial task's thread group
+ */
+#ifdef CONFIG_KEYS
+static struct thread_group_cred init_tgcred = {
+ .usage = ATOMIC_INIT(2),
+ .tgid = 0,
+ .lock = SPIN_LOCK_UNLOCKED,
+};
+#endif
+
+/*
+ * The initial credentials for the initial task
+ */
+struct cred init_cred = {
+ .usage = ATOMIC_INIT(4),
+ .securebits = SECUREBITS_DEFAULT,
+ .cap_inheritable = CAP_INIT_INH_SET,
+ .cap_permitted = CAP_FULL_SET,
+ .cap_effective = CAP_INIT_EFF_SET,
+ .cap_bset = CAP_INIT_BSET,
+ .user = INIT_USER,
+ .group_info = &init_groups,
+#ifdef CONFIG_KEYS
+ .tgcred = &init_tgcred,
+#endif
+};
+
+/*
+ * Dispose of the shared task group credentials
+ */
+#ifdef CONFIG_KEYS
+static void release_tgcred_rcu(struct rcu_head *rcu)
+{
+ struct thread_group_cred *tgcred =
+ container_of(rcu, struct thread_group_cred, rcu);
+
+ BUG_ON(atomic_read(&tgcred->usage) != 0);
+
+ key_put(tgcred->session_keyring);
+ key_put(tgcred->process_keyring);
+ kfree(tgcred);
+}
+#endif
+
+/*
+ * Release a set of thread group credentials.
+ */
+static void release_tgcred(struct cred *cred)
+{
+#ifdef CONFIG_KEYS
+ struct thread_group_cred *tgcred = cred->tgcred;
+
+ if (atomic_dec_and_test(&tgcred->usage))
+ call_rcu(&tgcred->rcu, release_tgcred_rcu);
+#endif
+}
+
+/*
+ * The RCU callback to actually dispose of a set of credentials
+ */
+static void put_cred_rcu(struct rcu_head *rcu)
+{
+ struct cred *cred = container_of(rcu, struct cred, rcu);
+
+ if (atomic_read(&cred->usage) != 0)
+ panic("CRED: put_cred_rcu() sees %p with usage %d\n",
+ cred, atomic_read(&cred->usage));
+
+ security_cred_free(cred);
+ key_put(cred->thread_keyring);
+ key_put(cred->request_key_auth);
+ release_tgcred(cred);
+ put_group_info(cred->group_info);
+ free_uid(cred->user);
+ kmem_cache_free(cred_jar, cred);
+}
+
+/**
+ * __put_cred - Destroy a set of credentials
+ * @cred: The record to release
+ *
+ * Destroy a set of credentials on which no references remain.
+ */
+void __put_cred(struct cred *cred)
+{
+ BUG_ON(atomic_read(&cred->usage) != 0);
+
+ call_rcu(&cred->rcu, put_cred_rcu);
+}
+EXPORT_SYMBOL(__put_cred);
+
+/**
+ * prepare_creds - Prepare a new set of credentials for modification
+ *
+ * Prepare a new set of task credentials for modification. A task's creds
+ * shouldn't generally be modified directly, therefore this function is used to
+ * prepare a new copy, which the caller then modifies and then commits by
+ * calling commit_creds().
+ *
+ * Preparation involves making a copy of the objective creds for modification.
+ *
+ * Returns a pointer to the new creds-to-be if successful, NULL otherwise.
+ *
+ * Call commit_creds() or abort_creds() to clean up.
+ */
+struct cred *prepare_creds(void)
+{
+ struct task_struct *task = current;
+ const struct cred *old;
+ struct cred *new;
+
+ BUG_ON(atomic_read(&task->real_cred->usage) < 1);
+
+ new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ old = task->cred;
+ memcpy(new, old, sizeof(struct cred));
+
+ atomic_set(&new->usage, 1);
+ get_group_info(new->group_info);
+ get_uid(new->user);
+
+#ifdef CONFIG_KEYS
+ key_get(new->thread_keyring);
+ key_get(new->request_key_auth);
+ atomic_inc(&new->tgcred->usage);
+#endif
+
+#ifdef CONFIG_SECURITY
+ new->security = NULL;
+#endif
+
+ if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+ goto error;
+ return new;
+
+error:
+ abort_creds(new);
+ return NULL;
+}
+EXPORT_SYMBOL(prepare_creds);
+
+/*
+ * Prepare credentials for current to perform an execve()
+ * - The caller must hold current->cred_exec_mutex
+ */
+struct cred *prepare_exec_creds(void)
+{
+ struct thread_group_cred *tgcred = NULL;
+ struct cred *new;
+
+#ifdef CONFIG_KEYS
+ tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
+ if (!tgcred)
+ return NULL;
+#endif
+
+ new = prepare_creds();
+ if (!new) {
+ kfree(tgcred);
+ return new;
+ }
+
+#ifdef CONFIG_KEYS
+ /* newly exec'd tasks don't get a thread keyring */
+ key_put(new->thread_keyring);
+ new->thread_keyring = NULL;
+
+ /* create a new per-thread-group creds for all this set of threads to
+ * share */
+ memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred));
+
+ atomic_set(&tgcred->usage, 1);
+ spin_lock_init(&tgcred->lock);
+
+ /* inherit the session keyring; new process keyring */
+ key_get(tgcred->session_keyring);
+ tgcred->process_keyring = NULL;
+
+ release_tgcred(new);
+ new->tgcred = tgcred;
+#endif
+
+ return new;
+}
+
+/*
+ * prepare new credentials for the usermode helper dispatcher
+ */
+struct cred *prepare_usermodehelper_creds(void)
+{
+#ifdef CONFIG_KEYS
+ struct thread_group_cred *tgcred = NULL;
+#endif
+ struct cred *new;
+
+#ifdef CONFIG_KEYS
+ tgcred = kzalloc(sizeof(*new->tgcred), GFP_ATOMIC);
+ if (!tgcred)
+ return NULL;
+#endif
+
+ new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
+ if (!new)
+ return NULL;
+
+ memcpy(new, &init_cred, sizeof(struct cred));
+
+ atomic_set(&new->usage, 1);
+ get_group_info(new->group_info);
+ get_uid(new->user);
+
+#ifdef CONFIG_KEYS
+ new->thread_keyring = NULL;
+ new->request_key_auth = NULL;
+ new->jit_keyring = KEY_REQKEY_DEFL_DEFAULT;
+
+ atomic_set(&tgcred->usage, 1);
+ spin_lock_init(&tgcred->lock);
+ new->tgcred = tgcred;
+#endif
+
+#ifdef CONFIG_SECURITY
+ new->security = NULL;
+#endif
+ if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0)
+ goto error;
+
+ BUG_ON(atomic_read(&new->usage) != 1);
+ return new;
+
+error:
+ put_cred(new);
+ return NULL;
+}
+
+/*
+ * Copy credentials for the new process created by fork()
+ *
+ * We share if we can, but under some circumstances we have to generate a new
+ * set.
+ *
+ * The new process gets the current process's subjective credentials as its
+ * objective and subjective credentials
+ */
+int copy_creds(struct task_struct *p, unsigned long clone_flags)
+{
+#ifdef CONFIG_KEYS
+ struct thread_group_cred *tgcred;
+#endif
+ struct cred *new;
+ int ret;
+
+ mutex_init(&p->cred_exec_mutex);
+
+ if (
+#ifdef CONFIG_KEYS
+ !p->cred->thread_keyring &&
+#endif
+ clone_flags & CLONE_THREAD
+ ) {
+ p->real_cred = get_cred(p->cred);
+ get_cred(p->cred);
+ atomic_inc(&p->cred->user->processes);
+ return 0;
+ }
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ if (clone_flags & CLONE_NEWUSER) {
+ ret = create_user_ns(new);
+ if (ret < 0)
+ goto error_put;
+ }
+
+#ifdef CONFIG_KEYS
+ /* new threads get their own thread keyrings if their parent already
+ * had one */
+ if (new->thread_keyring) {
+ key_put(new->thread_keyring);
+ new->thread_keyring = NULL;
+ if (clone_flags & CLONE_THREAD)
+ install_thread_keyring_to_cred(new);
+ }
+
+ /* we share the process and session keyrings between all the threads in
+ * a process - this is slightly icky as we violate COW credentials a
+ * bit */
+ if (!(clone_flags & CLONE_THREAD)) {
+ tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
+ if (!tgcred) {
+ ret = -ENOMEM;
+ goto error_put;
+ }
+ atomic_set(&tgcred->usage, 1);
+ spin_lock_init(&tgcred->lock);
+ tgcred->process_keyring = NULL;
+ tgcred->session_keyring = key_get(new->tgcred->session_keyring);
+
+ release_tgcred(new);
+ new->tgcred = tgcred;
+ }
+#endif
+
+ atomic_inc(&new->user->processes);
+ p->cred = p->real_cred = get_cred(new);
+ return 0;
+
+error_put:
+ put_cred(new);
+ return ret;
+}
+
+/**
+ * commit_creds - Install new credentials upon the current task
+ * @new: The credentials to be assigned
+ *
+ * Install a new set of credentials to the current task, using RCU to replace
+ * the old set. Both the objective and the subjective credentials pointers are
+ * updated. This function may not be called if the subjective credentials are
+ * in an overridden state.
+ *
+ * This function eats the caller's reference to the new credentials.
+ *
+ * Always returns 0 thus allowing this function to be tail-called at the end
+ * of, say, sys_setgid().
+ */
+int commit_creds(struct cred *new)
+{
+ struct task_struct *task = current;
+ const struct cred *old;
+
+ BUG_ON(task->cred != task->real_cred);
+ BUG_ON(atomic_read(&task->real_cred->usage) < 2);
+ BUG_ON(atomic_read(&new->usage) < 1);
+
+ old = task->real_cred;
+ security_commit_creds(new, old);
+
+ get_cred(new); /* we will require a ref for the subj creds too */
+
+ /* dumpability changes */
+ if (old->euid != new->euid ||
+ old->egid != new->egid ||
+ old->fsuid != new->fsuid ||
+ old->fsgid != new->fsgid ||
+ !cap_issubset(new->cap_permitted, old->cap_permitted)) {
+ set_dumpable(task->mm, suid_dumpable);
+ task->pdeath_signal = 0;
+ smp_wmb();
+ }
+
+ /* alter the thread keyring */
+ if (new->fsuid != old->fsuid)
+ key_fsuid_changed(task);
+ if (new->fsgid != old->fsgid)
+ key_fsgid_changed(task);
+
+ /* do it
+ * - What if a process setreuid()'s and this brings the
+ * new uid over his NPROC rlimit? We can check this now
+ * cheaply with the new uid cache, so if it matters
+ * we should be checking for it. -DaveM
+ */
+ if (new->user != old->user)
+ atomic_inc(&new->user->processes);
+ rcu_assign_pointer(task->real_cred, new);
+ rcu_assign_pointer(task->cred, new);
+ if (new->user != old->user)
+ atomic_dec(&old->user->processes);
+
+ sched_switch_user(task);
+
+ /* send notifications */
+ if (new->uid != old->uid ||
+ new->euid != old->euid ||
+ new->suid != old->suid ||
+ new->fsuid != old->fsuid)
+ proc_id_connector(task, PROC_EVENT_UID);
+
+ if (new->gid != old->gid ||
+ new->egid != old->egid ||
+ new->sgid != old->sgid ||
+ new->fsgid != old->fsgid)
+ proc_id_connector(task, PROC_EVENT_GID);
+
+ /* release the old obj and subj refs both */
+ put_cred(old);
+ put_cred(old);
+ return 0;
+}
+EXPORT_SYMBOL(commit_creds);
+
+/**
+ * abort_creds - Discard a set of credentials and unlock the current task
+ * @new: The credentials that were going to be applied
+ *
+ * Discard a set of credentials that were under construction and unlock the
+ * current task.
+ */
+void abort_creds(struct cred *new)
+{
+ BUG_ON(atomic_read(&new->usage) < 1);
+ put_cred(new);
+}
+EXPORT_SYMBOL(abort_creds);
+
+/**
+ * override_creds - Override the current process's subjective credentials
+ * @new: The credentials to be assigned
+ *
+ * Install a set of temporary override subjective credentials on the current
+ * process, returning the old set for later reversion.
+ */
+const struct cred *override_creds(const struct cred *new)
+{
+ const struct cred *old = current->cred;
+
+ rcu_assign_pointer(current->cred, get_cred(new));
+ return old;
+}
+EXPORT_SYMBOL(override_creds);
+
+/**
+ * revert_creds - Revert a temporary subjective credentials override
+ * @old: The credentials to be restored
+ *
+ * Revert a temporary set of override subjective credentials to an old set,
+ * discarding the override set.
+ */
+void revert_creds(const struct cred *old)
+{
+ const struct cred *override = current->cred;
+
+ rcu_assign_pointer(current->cred, old);
+ put_cred(override);
+}
+EXPORT_SYMBOL(revert_creds);
+
+/*
+ * initialise the credentials stuff
+ */
+void __init cred_init(void)
+{
+ /* allocate a slab in which we can store credentials */
+ cred_jar = kmem_cache_create("cred_jar", sizeof(struct cred),
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+}
+
+/**
+ * prepare_kernel_cred - Prepare a set of credentials for a kernel service
+ * @daemon: A userspace daemon to be used as a reference
+ *
+ * Prepare a set of credentials for a kernel service. This can then be used to
+ * override a task's own credentials so that work can be done on behalf of that
+ * task that requires a different subjective context.
+ *
+ * @daemon is used to provide a base for the security record, but can be NULL.
+ * If @daemon is supplied, then the security data will be derived from that;
+ * otherwise they'll be set to 0 and no groups, full capabilities and no keys.
+ *
+ * The caller may change these controls afterwards if desired.
+ *
+ * Returns the new credentials or NULL if out of memory.
+ *
+ * Does not take, and does not return holding current->cred_replace_mutex.
+ */
+struct cred *prepare_kernel_cred(struct task_struct *daemon)
+{
+ const struct cred *old;
+ struct cred *new;
+
+ new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ if (daemon)
+ old = get_task_cred(daemon);
+ else
+ old = get_cred(&init_cred);
+
+ get_uid(new->user);
+ get_group_info(new->group_info);
+
+#ifdef CONFIG_KEYS
+ atomic_inc(&init_tgcred.usage);
+ new->tgcred = &init_tgcred;
+ new->request_key_auth = NULL;
+ new->thread_keyring = NULL;
+ new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+#endif
+
+#ifdef CONFIG_SECURITY
+ new->security = NULL;
+#endif
+ if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+ goto error;
+
+ atomic_set(&new->usage, 1);
+ put_cred(old);
+ return new;
+
+error:
+ put_cred(new);
+ return NULL;
+}
+EXPORT_SYMBOL(prepare_kernel_cred);
+
+/**
+ * set_security_override - Set the security ID in a set of credentials
+ * @new: The credentials to alter
+ * @secid: The LSM security ID to set
+ *
+ * Set the LSM security ID in a set of credentials so that the subjective
+ * security is overridden when an alternative set of credentials is used.
+ */
+int set_security_override(struct cred *new, u32 secid)
+{
+ return security_kernel_act_as(new, secid);
+}
+EXPORT_SYMBOL(set_security_override);
+
+/**
+ * set_security_override_from_ctx - Set the security ID in a set of credentials
+ * @new: The credentials to alter
+ * @secctx: The LSM security context to generate the security ID from.
+ *
+ * Set the LSM security ID in a set of credentials so that the subjective
+ * security is overridden when an alternative set of credentials is used. The
+ * security ID is specified in string form as a security context to be
+ * interpreted by the LSM.
+ */
+int set_security_override_from_ctx(struct cred *new, const char *secctx)
+{
+ u32 secid;
+ int ret;
+
+ ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
+ if (ret < 0)
+ return ret;
+
+ return set_security_override(new, secid);
+}
+EXPORT_SYMBOL(set_security_override_from_ctx);
+
+/**
+ * set_create_files_as - Set the LSM file create context in a set of credentials
+ * @new: The credentials to alter
+ * @inode: The inode to take the context from
+ *
+ * Change the LSM file creation context in a set of credentials to be the same
+ * as the object context of the specified inode, so that the new inodes have
+ * the same MAC context as that inode.
+ */
+int set_create_files_as(struct cred *new, struct inode *inode)
+{
+ new->fsuid = inode->i_uid;
+ new->fsgid = inode->i_gid;
+ return security_kernel_create_files_as(new, inode);
+}
+EXPORT_SYMBOL(set_create_files_as);
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index b3179dad71be..abb6e17505e2 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
*/
t1 = tsk->sched_info.pcount;
t2 = tsk->sched_info.run_delay;
- t3 = tsk->sched_info.cpu_time;
+ t3 = tsk->se.sum_exec_runtime;
d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c
index 61ba5b4b10cf..c9e5a1c14e08 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,12 +46,14 @@
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/tracehook.h>
+#include <linux/init_task.h>
#include <trace/sched.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
+#include "cred-internals.h"
DEFINE_TRACE(sched_process_free);
DEFINE_TRACE(sched_process_exit);
@@ -168,7 +170,10 @@ void release_task(struct task_struct * p)
int zap_leader;
repeat:
tracehook_prepare_release_task(p);
- atomic_dec(&p->user->processes);
+ /* don't need to get the RCU readlock here - the process is dead and
+ * can't be modifying its own credentials */
+ atomic_dec(&__task_cred(p)->user->processes);
+
proc_flush_task(p);
write_lock_irq(&tasklist_lock);
tracehook_finish_release_task(p);
@@ -343,12 +348,12 @@ static void reparent_to_kthreadd(void)
/* cpus_allowed? */
/* rt_priority? */
/* signals? */
- security_task_reparent_to_init(current);
memcpy(current->signal->rlim, init_task.signal->rlim,
sizeof(current->signal->rlim));
- atomic_inc(&(INIT_USER->__count));
+
+ atomic_inc(&init_cred.usage);
+ commit_creds(&init_cred);
write_unlock_irq(&tasklist_lock);
- switch_uid(INIT_USER);
}
void __set_special_pids(struct pid *pid)
@@ -1032,8 +1037,6 @@ NORET_TYPE void do_exit(long code)
* task into the wait for ever nirwana as well.
*/
tsk->flags |= PF_EXITPIDONE;
- if (tsk->io_context)
- exit_io_context();
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
@@ -1082,7 +1085,6 @@ NORET_TYPE void do_exit(long code)
check_stack_usage();
exit_thread();
cgroup_exit(tsk, 1);
- exit_keys(tsk);
if (group_dead && tsk->signal->leader)
disassociate_ctty(1);
@@ -1266,12 +1268,12 @@ static int wait_task_zombie(struct task_struct *p, int options,
unsigned long state;
int retval, status, traced;
pid_t pid = task_pid_vnr(p);
+ uid_t uid = __task_cred(p)->uid;
if (!likely(options & WEXITED))
return 0;
if (unlikely(options & WNOWAIT)) {
- uid_t uid = p->uid;
int exit_code = p->exit_code;
int why, status;
@@ -1392,7 +1394,7 @@ static int wait_task_zombie(struct task_struct *p, int options,
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
- retval = put_user(p->uid, &infop->si_uid);
+ retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = pid;
@@ -1457,7 +1459,8 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
if (!unlikely(options & WNOWAIT))
p->exit_code = 0;
- uid = p->uid;
+ /* don't need the RCU readlock here as we're holding a spinlock */
+ uid = __task_cred(p)->uid;
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
@@ -1531,10 +1534,10 @@ static int wait_task_continued(struct task_struct *p, int options,
}
if (!unlikely(options & WNOWAIT))
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
+ uid = __task_cred(p)->uid;
spin_unlock_irq(&p->sighand->siglock);
pid = task_pid_vnr(p);
- uid = p->uid;
get_task_struct(p);
read_unlock(&tasklist_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index 7b93da72d4a2..43cbf30669e6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -151,9 +151,8 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
- security_task_free(tsk);
- free_uid(tsk->user);
- put_group_info(tsk->group_info);
+ put_cred(tsk->real_cred);
+ put_cred(tsk->cred);
delayacct_tsk_free(tsk);
if (!profile_handoff_task(tsk))
@@ -416,8 +415,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
spin_lock_init(&mm->page_table_lock);
- rwlock_init(&mm->ioctx_list_lock);
- mm->ioctx_list = NULL;
+ spin_lock_init(&mm->ioctx_lock);
+ INIT_HLIST_HEAD(&mm->ioctx_list);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_init_owner(mm, p);
@@ -822,12 +821,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
if (!sig)
return -ENOMEM;
- ret = copy_thread_group_keys(tsk);
- if (ret < 0) {
- kmem_cache_free(signal_cachep, sig);
- return ret;
- }
-
atomic_set(&sig->count, 1);
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
@@ -872,7 +865,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
void __cleanup_signal(struct signal_struct *sig)
{
thread_group_cputime_free(sig);
- exit_thread_group_keys(sig);
tty_kref_put(sig->tty);
kmem_cache_free(signal_cachep, sig);
}
@@ -988,16 +980,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
- if (atomic_read(&p->user->processes) >=
+ if (atomic_read(&p->real_cred->user->processes) >=
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
- p->user != current->nsproxy->user_ns->root_user)
+ p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
- atomic_inc(&p->user->__count);
- atomic_inc(&p->user->processes);
- get_group_info(p->group_info);
+ retval = copy_creds(p, clone_flags);
+ if (retval < 0)
+ goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
@@ -1052,10 +1044,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
-#ifdef CONFIG_SECURITY
- p->security = NULL;
-#endif
- p->cap_bset = current->cap_bset;
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
@@ -1096,14 +1084,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
+ if (unlikely(ptrace_reparented(current)))
+ ptrace_fork(p, clone_flags);
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
- if ((retval = security_task_alloc(p)))
- goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
- goto bad_fork_cleanup_security;
+ goto bad_fork_cleanup_policy;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
@@ -1117,10 +1105,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
- if ((retval = copy_keys(clone_flags, p)))
- goto bad_fork_cleanup_mm;
if ((retval = copy_namespaces(clone_flags, p)))
- goto bad_fork_cleanup_keys;
+ goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
@@ -1289,8 +1275,6 @@ bad_fork_cleanup_io:
put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
-bad_fork_cleanup_keys:
- exit_keys(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
@@ -1306,8 +1290,6 @@ bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
-bad_fork_cleanup_security:
- security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
@@ -1320,9 +1302,9 @@ bad_fork_cleanup_cgroup:
bad_fork_cleanup_put_domain:
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
- put_group_info(p->group_info);
- atomic_dec(&p->user->processes);
- free_uid(p->user);
+ atomic_dec(&p->cred->user->processes);
+ put_cred(p->real_cred);
+ put_cred(p->cred);
bad_fork_free:
free_task(p);
fork_out:
@@ -1366,6 +1348,21 @@ long do_fork(unsigned long clone_flags,
long nr;
/*
+ * Do some preliminary argument and permissions checking before we
+ * actually start allocating stuff
+ */
+ if (clone_flags & CLONE_NEWUSER) {
+ if (clone_flags & CLONE_THREAD)
+ return -EINVAL;
+ /* hopefully this check will go away when userns support is
+ * complete
+ */
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
+ !capable(CAP_SETGID))
+ return -EPERM;
+ }
+
+ /*
* We hope to recycle these flags after 2.6.26
*/
if (unlikely(clone_flags & CLONE_STOPPED)) {
@@ -1613,8 +1610,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
- CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
- CLONE_NEWNET))
+ CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
goto bad_unshare_out;
/*
diff --git a/kernel/futex.c b/kernel/futex.c
index e10c5c8786a6..7c6cbabe52b3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -92,11 +92,12 @@ struct futex_pi_state {
* A futex_q has a woken state, just like tasks have TASK_RUNNING.
* It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
* The order of wakup is always to make the first condition true, then
- * wake up q->waiters, then make the second condition true.
+ * wake up q->waiter, then make the second condition true.
*/
struct futex_q {
struct plist_node list;
- wait_queue_head_t waiters;
+ /* There can only be a single waiter */
+ wait_queue_head_t waiter;
/* Which hash list lock to use: */
spinlock_t *lock_ptr;
@@ -405,13 +406,20 @@ static void free_pi_state(struct futex_pi_state *pi_state)
static struct task_struct * futex_find_get_task(pid_t pid)
{
struct task_struct *p;
+ const struct cred *cred = current_cred(), *pcred;
rcu_read_lock();
p = find_task_by_vpid(pid);
- if (!p || ((current->euid != p->euid) && (current->euid != p->uid)))
+ if (!p) {
p = ERR_PTR(-ESRCH);
- else
- get_task_struct(p);
+ } else {
+ pcred = __task_cred(p);
+ if (cred->euid != pcred->euid &&
+ cred->euid != pcred->uid)
+ p = ERR_PTR(-ESRCH);
+ else
+ get_task_struct(p);
+ }
rcu_read_unlock();
@@ -573,7 +581,7 @@ static void wake_futex(struct futex_q *q)
* The lock in wake_up_all() is a crucial memory barrier after the
* plist_del() and also before assigning to q->lock_ptr.
*/
- wake_up_all(&q->waiters);
+ wake_up(&q->waiter);
/*
* The waiting task can free the futex_q as soon as this is written,
* without taking any locks. This must come last.
@@ -930,7 +938,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
{
struct futex_hash_bucket *hb;
- init_waitqueue_head(&q->waiters);
+ init_waitqueue_head(&q->waiter);
get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
@@ -1142,12 +1150,13 @@ handle_fault:
* In case we must use restart_block to restart a futex_wait,
* we encode in the 'flags' shared capability
*/
-#define FLAGS_SHARED 1
+#define FLAGS_SHARED 0x01
+#define FLAGS_CLOCKRT 0x02
static long futex_wait_restart(struct restart_block *restart);
static int futex_wait(u32 __user *uaddr, int fshared,
- u32 val, ktime_t *abs_time, u32 bitset)
+ u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
struct task_struct *curr = current;
DECLARE_WAITQUEUE(wait, curr);
@@ -1220,7 +1229,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
/* add_wait_queue is the barrier after __set_current_state. */
__set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&q.waiters, &wait);
+ add_wait_queue(&q.waiter, &wait);
/*
* !plist_node_empty() is safe here without any lock.
* q.lock_ptr != 0 is not safe, because of ordering against wakeup.
@@ -1233,8 +1242,10 @@ static int futex_wait(u32 __user *uaddr, int fshared,
slack = current->timer_slack_ns;
if (rt_task(current))
slack = 0;
- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
+ hrtimer_init_on_stack(&t.timer,
+ clockrt ? CLOCK_REALTIME :
+ CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
hrtimer_init_sleeper(&t, current);
hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
@@ -1289,6 +1300,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
if (fshared)
restart->futex.flags |= FLAGS_SHARED;
+ if (clockrt)
+ restart->futex.flags |= FLAGS_CLOCKRT;
return -ERESTART_RESTARTBLOCK;
}
@@ -1312,7 +1325,8 @@ static long futex_wait_restart(struct restart_block *restart)
if (restart->futex.flags & FLAGS_SHARED)
fshared = 1;
return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
- restart->futex.bitset);
+ restart->futex.bitset,
+ restart->futex.flags & FLAGS_CLOCKRT);
}
@@ -1558,12 +1572,11 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
uaddr_faulted:
/*
- * We have to r/w *(int __user *)uaddr, but we can't modify it
- * non-atomically. Therefore, if get_user below is not
- * enough, we need to handle the fault ourselves, while
- * still holding the mmap_sem.
- *
- * ... and hb->lock. :-) --ANK
+ * We have to r/w *(int __user *)uaddr, and we have to modify it
+ * atomically. Therefore, if we continue to fault after get_user()
+ * below, we need to handle the fault ourselves, while still holding
+ * the mmap_sem. This can occur if the uaddr is under contention as
+ * we have to drop the mmap_sem in order to call get_user().
*/
queue_unlock(&q, hb);
@@ -1575,7 +1588,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
}
ret = get_user(uval, uaddr);
- if (!ret && (uval != -EFAULT))
+ if (!ret)
goto retry;
if (to)
@@ -1669,12 +1682,11 @@ out:
pi_faulted:
/*
- * We have to r/w *(int __user *)uaddr, but we can't modify it
- * non-atomically. Therefore, if get_user below is not
- * enough, we need to handle the fault ourselves, while
- * still holding the mmap_sem.
- *
- * ... and hb->lock. --ANK
+ * We have to r/w *(int __user *)uaddr, and we have to modify it
+ * atomically. Therefore, if we continue to fault after get_user()
+ * below, we need to handle the fault ourselves, while still holding
+ * the mmap_sem. This can occur if the uaddr is under contention as
+ * we have to drop the mmap_sem in order to call get_user().
*/
spin_unlock(&hb->lock);
@@ -1687,7 +1699,7 @@ pi_faulted:
}
ret = get_user(uval, uaddr);
- if (!ret && (uval != -EFAULT))
+ if (!ret)
goto retry;
return ret;
@@ -1742,6 +1754,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
{
struct robust_list_head __user *head;
unsigned long ret;
+ const struct cred *cred = current_cred(), *pcred;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
@@ -1757,8 +1770,10 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
if (!p)
goto err_unlock;
ret = -EPERM;
- if ((current->euid != p->euid) && (current->euid != p->uid) &&
- !capable(CAP_SYS_PTRACE))
+ pcred = __task_cred(p);
+ if (cred->euid != pcred->euid &&
+ cred->euid != pcred->uid &&
+ !capable(CAP_SYS_PTRACE))
goto err_unlock;
head = p->robust_list;
rcu_read_unlock();
@@ -1905,18 +1920,22 @@ void exit_robust_list(struct task_struct *curr)
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
- int ret = -ENOSYS;
+ int clockrt, ret = -ENOSYS;
int cmd = op & FUTEX_CMD_MASK;
int fshared = 0;
if (!(op & FUTEX_PRIVATE_FLAG))
fshared = 1;
+ clockrt = op & FUTEX_CLOCK_REALTIME;
+ if (clockrt && cmd != FUTEX_WAIT_BITSET)
+ return -ENOSYS;
+
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAIT_BITSET:
- ret = futex_wait(uaddr, fshared, val, timeout, val3);
+ ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
break;
case FUTEX_WAKE:
val3 = FUTEX_BITSET_MATCH_ANY;
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 04ac3a9e42cf..d607a5b9ee29 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -135,6 +135,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
{
struct compat_robust_list_head __user *head;
unsigned long ret;
+ const struct cred *cred = current_cred(), *pcred;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
@@ -150,8 +151,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
if (!p)
goto err_unlock;
ret = -EPERM;
- if ((current->euid != p->euid) && (current->euid != p->uid) &&
- !capable(CAP_SYS_PTRACE))
+ pcred = __task_cred(p);
+ if (cred->euid != pcred->euid &&
+ cred->euid != pcred->uid &&
+ !capable(CAP_SYS_PTRACE))
goto err_unlock;
head = p->compat_robust_list;
read_unlock(&tasklist_lock);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 47e63349d1b2..bda9cb924276 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -442,22 +442,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
-/*
- * Check, whether the timer is on the callback pending list
- */
-static inline int hrtimer_cb_pending(const struct hrtimer *timer)
-{
- return timer->state & HRTIMER_STATE_PENDING;
-}
-
-/*
- * Remove a timer from the callback pending list
- */
-static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
-{
- list_del_init(&timer->cb_entry);
-}
-
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -651,6 +635,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
{
}
+static void __run_hrtimer(struct hrtimer *timer);
+
/*
* When High resolution timers are active, try to reprogram. Note, that in case
* the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -661,31 +647,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-
- /* Timer is expired, act upon the callback mode */
- switch(timer->cb_mode) {
- case HRTIMER_CB_IRQSAFE_PERCPU:
- case HRTIMER_CB_IRQSAFE_UNLOCKED:
- /*
- * This is solely for the sched tick emulation with
- * dynamic tick support to ensure that we do not
- * restart the tick right on the edge and end up with
- * the tick timer in the softirq ! The calling site
- * takes care of this. Also used for hrtimer sleeper !
- */
- debug_hrtimer_deactivate(timer);
- return 1;
- case HRTIMER_CB_SOFTIRQ:
- /*
- * Move everything else into the softirq pending list !
- */
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- timer->state = HRTIMER_STATE_PENDING;
- return 1;
- default:
- BUG();
- }
+ /*
+ * XXX: recursion check?
+ * hrtimer_forward() should round up with timer granularity
+ * so that we never get into inf recursion here,
+ * it doesn't do that though
+ */
+ __run_hrtimer(timer);
+ return 1;
}
return 0;
}
@@ -724,11 +693,6 @@ static int hrtimer_switch_to_hres(void)
return 1;
}
-static inline void hrtimer_raise_softirq(void)
-{
- raise_softirq(HRTIMER_SOFTIRQ);
-}
-
#else
static inline int hrtimer_hres_active(void) { return 0; }
@@ -747,7 +711,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
{
return 0;
}
-static inline void hrtimer_raise_softirq(void) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -890,10 +853,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
{
- /* High res. callback list. NOP for !HIGHRES */
- if (hrtimer_cb_pending(timer))
- hrtimer_remove_cb_pending(timer);
- else {
+ if (timer->state & HRTIMER_STATE_ENQUEUED) {
/*
* Remove the timer from the rbtree and replace the
* first entry pointer if necessary.
@@ -953,7 +913,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
- int ret, raise;
+ int ret;
base = lock_hrtimer_base(timer, &flags);
@@ -988,26 +948,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
enqueue_hrtimer(timer, new_base,
new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
- /*
- * The timer may be expired and moved to the cb_pending
- * list. We can not raise the softirq with base lock held due
- * to a possible deadlock with runqueue lock.
- */
- raise = timer->state == HRTIMER_STATE_PENDING;
-
- /*
- * We use preempt_disable to prevent this task from migrating after
- * setting up the softirq and raising it. Otherwise, if me migrate
- * we will raise the softirq on the wrong CPU.
- */
- preempt_disable();
-
unlock_hrtimer_base(timer, &flags);
- if (raise)
- hrtimer_raise_softirq();
- preempt_enable();
-
return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1192,75 +1134,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
}
EXPORT_SYMBOL_GPL(hrtimer_get_res);
-static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
-{
- spin_lock_irq(&cpu_base->lock);
-
- while (!list_empty(&cpu_base->cb_pending)) {
- enum hrtimer_restart (*fn)(struct hrtimer *);
- struct hrtimer *timer;
- int restart;
- int emulate_hardirq_ctx = 0;
-
- timer = list_entry(cpu_base->cb_pending.next,
- struct hrtimer, cb_entry);
-
- debug_hrtimer_deactivate(timer);
- timer_stats_account_hrtimer(timer);
-
- fn = timer->function;
- /*
- * A timer might have been added to the cb_pending list
- * when it was migrated during a cpu-offline operation.
- * Emulate hardirq context for such timers.
- */
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
- timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
- emulate_hardirq_ctx = 1;
-
- __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
- spin_unlock_irq(&cpu_base->lock);
-
- if (unlikely(emulate_hardirq_ctx)) {
- local_irq_disable();
- restart = fn(timer);
- local_irq_enable();
- } else
- restart = fn(timer);
-
- spin_lock_irq(&cpu_base->lock);
-
- timer->state &= ~HRTIMER_STATE_CALLBACK;
- if (restart == HRTIMER_RESTART) {
- BUG_ON(hrtimer_active(timer));
- /*
- * Enqueue the timer, allow reprogramming of the event
- * device
- */
- enqueue_hrtimer(timer, timer->base, 1);
- } else if (hrtimer_active(timer)) {
- /*
- * If the timer was rearmed on another CPU, reprogram
- * the event device.
- */
- struct hrtimer_clock_base *base = timer->base;
-
- if (base->first == &timer->node &&
- hrtimer_reprogram(timer, base)) {
- /*
- * Timer is expired. Thus move it from tree to
- * pending list again.
- */
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- }
- }
- }
- spin_unlock_irq(&cpu_base->lock);
-}
-
static void __run_hrtimer(struct hrtimer *timer)
{
struct hrtimer_clock_base *base = timer->base;
@@ -1268,25 +1141,21 @@ static void __run_hrtimer(struct hrtimer *timer)
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
+ WARN_ON(!irqs_disabled());
+
debug_hrtimer_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
-
fn = timer->function;
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
- timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
- /*
- * Used for scheduler timers, avoid lock inversion with
- * rq->lock and tasklist_lock.
- *
- * These timers are required to deal with enqueue expiry
- * themselves and are not allowed to migrate.
- */
- spin_unlock(&cpu_base->lock);
- restart = fn(timer);
- spin_lock(&cpu_base->lock);
- } else
- restart = fn(timer);
+
+ /*
+ * Because we run timers from hardirq context, there is no chance
+ * they get migrated to another cpu, therefore its safe to unlock
+ * the timer base.
+ */
+ spin_unlock(&cpu_base->lock);
+ restart = fn(timer);
+ spin_lock(&cpu_base->lock);
/*
* Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
@@ -1311,7 +1180,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
ktime_t expires_next, now;
- int i, raise = 0;
+ int i;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1360,16 +1229,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
break;
}
- /* Move softirq callbacks to the pending list */
- if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- raise = 1;
- continue;
- }
-
__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
@@ -1383,10 +1242,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
if (tick_program_event(expires_next, 0))
goto retry;
}
-
- /* Raise softirq ? */
- if (raise)
- raise_softirq(HRTIMER_SOFTIRQ);
}
/**
@@ -1413,11 +1268,6 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
- run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
-}
-
#endif /* CONFIG_HIGH_RES_TIMERS */
/*
@@ -1429,8 +1279,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
*/
void hrtimer_run_pending(void)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-
if (hrtimer_hres_active())
return;
@@ -1444,8 +1292,6 @@ void hrtimer_run_pending(void)
*/
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
hrtimer_switch_to_hres();
-
- run_hrtimer_pending(cpu_base);
}
/*
@@ -1482,14 +1328,6 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- continue;
- }
-
__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
@@ -1516,9 +1354,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
sl->task = task;
-#ifdef CONFIG_HIGH_RES_TIMERS
- sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
-#endif
}
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -1655,18 +1490,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
- INIT_LIST_HEAD(&cpu_base->cb_pending);
hrtimer_init_hres(cpu_base);
}
#ifdef CONFIG_HOTPLUG_CPU
-static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base, int dcpu)
+static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ struct hrtimer_clock_base *new_base)
{
struct hrtimer *timer;
struct rb_node *node;
- int raise = 0;
while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node);
@@ -1674,18 +1507,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
debug_hrtimer_deactivate(timer);
/*
- * Should not happen. Per CPU timers should be
- * canceled _before_ the migration code is called
- */
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
- __remove_hrtimer(timer, old_base,
- HRTIMER_STATE_INACTIVE, 0);
- WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
- timer, timer->function, dcpu);
- continue;
- }
-
- /*
* Mark it as STATE_MIGRATE not INACTIVE otherwise the
* timer could be seen as !active and just vanish away
* under us on another CPU
@@ -1693,69 +1514,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
timer->base = new_base;
/*
- * Enqueue the timer. Allow reprogramming of the event device
+ * Enqueue the timers on the new cpu, but do not reprogram
+ * the timer as that would enable a deadlock between
+ * hrtimer_enqueue_reprogramm() running the timer and us still
+ * holding a nested base lock.
+ *
+ * Instead we tickle the hrtimer interrupt after the migration
+ * is done, which will run all expired timers and re-programm
+ * the timer device.
*/
- enqueue_hrtimer(timer, new_base, 1);
+ enqueue_hrtimer(timer, new_base, 0);
-#ifdef CONFIG_HIGH_RES_TIMERS
- /*
- * Happens with high res enabled when the timer was
- * already expired and the callback mode is
- * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
- * enqueue code does not move them to the soft irq
- * pending list for performance/latency reasons, but
- * in the migration state, we need to do that
- * otherwise we end up with a stale timer.
- */
- if (timer->state == HRTIMER_STATE_MIGRATE) {
- timer->state = HRTIMER_STATE_PENDING;
- list_add_tail(&timer->cb_entry,
- &new_base->cpu_base->cb_pending);
- raise = 1;
- }
-#endif
/* Clear the migration state bit */
timer->state &= ~HRTIMER_STATE_MIGRATE;
}
- return raise;
-}
-
-#ifdef CONFIG_HIGH_RES_TIMERS
-static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
- struct hrtimer_cpu_base *new_base)
-{
- struct hrtimer *timer;
- int raise = 0;
-
- while (!list_empty(&old_base->cb_pending)) {
- timer = list_entry(old_base->cb_pending.next,
- struct hrtimer, cb_entry);
-
- __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
- timer->base = &new_base->clock_base[timer->base->index];
- list_add_tail(&timer->cb_entry, &new_base->cb_pending);
- raise = 1;
- }
- return raise;
-}
-#else
-static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
- struct hrtimer_cpu_base *new_base)
-{
- return 0;
}
-#endif
-static void migrate_hrtimers(int cpu)
+static int migrate_hrtimers(int scpu)
{
struct hrtimer_cpu_base *old_base, *new_base;
- int i, raise = 0;
+ int dcpu, i;
- BUG_ON(cpu_online(cpu));
- old_base = &per_cpu(hrtimer_bases, cpu);
+ BUG_ON(cpu_online(scpu));
+ old_base = &per_cpu(hrtimer_bases, scpu);
new_base = &get_cpu_var(hrtimer_bases);
- tick_cancel_sched_timer(cpu);
+ dcpu = smp_processor_id();
+
+ tick_cancel_sched_timer(scpu);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
@@ -1764,41 +1550,47 @@ static void migrate_hrtimers(int cpu)
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- if (migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i], cpu))
- raise = 1;
+ migrate_hrtimer_list(&old_base->clock_base[i],
+ &new_base->clock_base[i]);
}
- if (migrate_hrtimer_pending(old_base, new_base))
- raise = 1;
-
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
put_cpu_var(hrtimer_bases);
- if (raise)
- hrtimer_raise_softirq();
+ return dcpu;
+}
+
+static void tickle_timers(void *arg)
+{
+ hrtimer_peek_ahead_timers();
}
+
#endif /* CONFIG_HOTPLUG_CPU */
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- unsigned int cpu = (long)hcpu;
+ int scpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- init_hrtimers_cpu(cpu);
+ init_hrtimers_cpu(scpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
- migrate_hrtimers(cpu);
+ {
+ int dcpu;
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
+ dcpu = migrate_hrtimers(scpu);
+ smp_call_function_single(dcpu, tickle_timers, NULL, 0);
break;
+ }
#endif
default:
@@ -1817,9 +1609,6 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
- open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
}
/**
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b343deedae91..f63c706d25e1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -125,6 +125,7 @@ int set_irq_type(unsigned int irq, unsigned int type)
return -ENODEV;
}
+ type &= IRQ_TYPE_SENSE_MASK;
if (type == IRQ_TYPE_NONE)
return 0;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index f1a23069c20a..6492400cb50d 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -422,11 +422,8 @@ out:
}
#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
void early_init_irq_lock_class(void)
{
-#ifndef CONFIG_SPARSE_IRQ
struct irq_desc *desc;
int i;
@@ -436,9 +433,7 @@ void early_init_irq_lock_class(void)
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
}
-#endif
}
-#endif
#ifdef CONFIG_SPARSE_IRQ
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 10ad2f87ed9a..cd0cd8dcb345 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,8 +16,15 @@
#include "internals.h"
#ifdef CONFIG_SMP
+cpumask_var_t irq_default_affinity;
-cpumask_t irq_default_affinity = CPU_MASK_ALL;
+static int init_irq_default_affinity(void)
+{
+ alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
+ cpumask_setall(irq_default_affinity);
+ return 0;
+}
+core_initcall(init_irq_default_affinity);
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
@@ -127,7 +134,7 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_AFFINITY_SET;
}
- cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity);
+ cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
desc->chip->set_affinity(irq, &desc->affinity);
@@ -368,16 +375,18 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return 0;
}
- ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
+ /* caller masked out all except trigger mode flags */
+ ret = chip->set_type(irq, flags);
if (ret)
pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
- (int)(flags & IRQF_TRIGGER_MASK),
- irq, chip->set_type);
+ (int)flags, irq, chip->set_type);
else {
+ if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ flags |= IRQ_LEVEL;
/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
- desc->status &= ~IRQ_TYPE_SENSE_MASK;
- desc->status |= flags & IRQ_TYPE_SENSE_MASK;
+ desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
+ desc->status |= flags;
}
return ret;
@@ -457,7 +466,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
- ret = __irq_set_trigger(desc, irq, new->flags);
+ ret = __irq_set_trigger(desc, irq,
+ new->flags & IRQF_TRIGGER_MASK);
if (ret) {
spin_unlock_irqrestore(&desc->lock, flags);
@@ -671,6 +681,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
struct irq_desc *desc;
int retval;
+ /*
+ * handle_IRQ_event() always ignores IRQF_DISABLED except for
+ * the _first_ irqaction (sigh). That can cause oopsing, but
+ * the behavior is classified as "will not fix" so we need to
+ * start nudging drivers away from using that idiom.
+ */
+ if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
+ == (IRQF_SHARED|IRQF_DISABLED))
+ pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
+ "guaranteed on shared IRQs\n",
+ irq, devname);
+
#ifdef CONFIG_LOCKDEP
/*
* Lockdep wants atomic interrupt handlers:
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 0178e2296990..089c3746358a 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -1,13 +1,8 @@
/*
- * linux/kernel/irq/handle.c
- *
- * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
- * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
- *
- * This file contains the core interrupt handling code.
- *
- * Detailed information is available in Documentation/DocBook/genericirq
+ * NUMA irq-desc migration code
*
+ * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
+ * the new "home node" of the IRQ.
*/
#include <linux/irq.h>
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index d2c0e5ee53c5..2abd3a7716ed 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
- cpumask_t *mask = &desc->affinity;
+ const struct cpumask *mask = &desc->affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PENDING)
@@ -93,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = {
static int default_affinity_show(struct seq_file *m, void *v)
{
- seq_cpumask(m, &irq_default_affinity);
+ seq_cpumask(m, irq_default_affinity);
seq_putc(m, '\n');
return 0;
}
@@ -101,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v)
static ssize_t default_affinity_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
- cpumask_t new_value;
+ cpumask_var_t new_value;
int err;
- err = cpumask_parse_user(buffer, count, &new_value);
+ if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = cpumask_parse_user(buffer, count, new_value);
if (err)
- return err;
+ goto out;
- if (!is_affinity_mask_valid(new_value))
- return -EINVAL;
+ if (!is_affinity_mask_valid(new_value)) {
+ err = -EINVAL;
+ goto out;
+ }
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
- if (!cpus_intersects(new_value, cpu_online_map))
- return -EINVAL;
+ if (!cpumask_intersects(new_value, cpu_online_mask)) {
+ err = -EINVAL;
+ goto out;
+ }
- irq_default_affinity = new_value;
+ cpumask_copy(irq_default_affinity, new_value);
+ err = count;
- return count;
+out:
+ free_cpumask_var(new_value);
+ return err;
}
static int default_affinity_open(struct inode *inode, struct file *file)
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 7b8b0f21a5b1..e694afa0eb8c 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -30,20 +30,19 @@
#define all_var 0
#endif
-/* These will be re-linked against their real values during the second link stage */
-extern const unsigned long kallsyms_addresses[] __attribute__((weak));
-extern const u8 kallsyms_names[] __attribute__((weak));
+extern const unsigned long kallsyms_addresses[];
+extern const u8 kallsyms_names[];
/* tell the compiler that the count isn't in the small data section if the arch
* has one (eg: FRV)
*/
extern const unsigned long kallsyms_num_syms
-__attribute__((weak, section(".rodata")));
+ __attribute__((__section__(".rodata")));
-extern const u8 kallsyms_token_table[] __attribute__((weak));
-extern const u16 kallsyms_token_index[] __attribute__((weak));
+extern const u8 kallsyms_token_table[];
+extern const u16 kallsyms_token_index[];
-extern const unsigned long kallsyms_markers[] __attribute__((weak));
+extern const unsigned long kallsyms_markers[];
static inline int is_kernel_inittext(unsigned long addr)
{
@@ -168,9 +167,6 @@ static unsigned long get_symbol_pos(unsigned long addr,
unsigned long symbol_start = 0, symbol_end = 0;
unsigned long i, low, high, mid;
- /* This kernel should never had been booted. */
- BUG_ON(!kallsyms_addresses);
-
/* do a binary search on the sorted kallsyms_addresses array */
low = 0;
high = kallsyms_num_syms;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ac0fde7b54d0..3fb855ad6aa0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1116,7 +1116,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
struct elf_prstatus prstatus;
u32 *buf;
- if ((cpu < 0) || (cpu >= NR_CPUS))
+ if ((cpu < 0) || (cpu >= nr_cpu_ids))
return;
/* Using ELF notes here is opportunistic.
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 3d3c3ea3a023..b46dbb908669 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -118,10 +118,10 @@ EXPORT_SYMBOL(request_module);
struct subprocess_info {
struct work_struct work;
struct completion *complete;
+ struct cred *cred;
char *path;
char **argv;
char **envp;
- struct key *ring;
enum umh_wait wait;
int retval;
struct file *stdin;
@@ -134,19 +134,20 @@ struct subprocess_info {
static int ____call_usermodehelper(void *data)
{
struct subprocess_info *sub_info = data;
- struct key *new_session, *old_session;
int retval;
- /* Unblock all signals and set the session keyring. */
- new_session = key_get(sub_info->ring);
+ BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
+
+ /* Unblock all signals */
spin_lock_irq(&current->sighand->siglock);
- old_session = __install_session_keyring(current, new_session);
flush_signal_handlers(current, 1);
sigemptyset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- key_put(old_session);
+ /* Install the credentials */
+ commit_creds(sub_info->cred);
+ sub_info->cred = NULL;
/* Install input pipe when needed */
if (sub_info->stdin) {
@@ -185,6 +186,8 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
if (info->cleanup)
(*info->cleanup)(info->argv, info->envp);
+ if (info->cred)
+ put_cred(info->cred);
kfree(info);
}
EXPORT_SYMBOL(call_usermodehelper_freeinfo);
@@ -240,6 +243,8 @@ static void __call_usermodehelper(struct work_struct *work)
pid_t pid;
enum umh_wait wait = sub_info->wait;
+ BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
+
/* CLONE_VFORK: wait until the usermode helper has execve'd
* successfully We need the data structures to stay around
* until that is done. */
@@ -362,6 +367,9 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
sub_info->path = path;
sub_info->argv = argv;
sub_info->envp = envp;
+ sub_info->cred = prepare_usermodehelper_creds();
+ if (!sub_info->cred)
+ return NULL;
out:
return sub_info;
@@ -376,7 +384,13 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
void call_usermodehelper_setkeys(struct subprocess_info *info,
struct key *session_keyring)
{
- info->ring = session_keyring;
+#ifdef CONFIG_KEYS
+ struct thread_group_cred *tgcred = info->cred->tgcred;
+ key_put(tgcred->session_keyring);
+ tgcred->session_keyring = key_get(session_keyring);
+#else
+ BUG();
+#endif
}
EXPORT_SYMBOL(call_usermodehelper_setkeys);
@@ -444,6 +458,8 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
DECLARE_COMPLETION_ONSTACK(done);
int retval = 0;
+ BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
+
helper_lock();
if (sub_info->path[0] == '\0')
goto out;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index c4c7df23f8c7..06b0c3568f0b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -292,14 +292,12 @@ void lockdep_off(void)
{
current->lockdep_recursion++;
}
-
EXPORT_SYMBOL(lockdep_off);
void lockdep_on(void)
{
current->lockdep_recursion--;
}
-
EXPORT_SYMBOL(lockdep_on);
/*
@@ -581,7 +579,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
/*
* printk all lock dependencies starting at <entry>:
*/
-static void print_lock_dependencies(struct lock_class *class, int depth)
+static void __used
+print_lock_dependencies(struct lock_class *class, int depth)
{
struct lock_list *entry;
@@ -2513,7 +2512,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (subclass)
register_lock_class(lock, subclass, 1);
}
-
EXPORT_SYMBOL_GPL(lockdep_init_map);
/*
@@ -2694,8 +2692,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
}
static int
-__lock_set_subclass(struct lockdep_map *lock,
- unsigned int subclass, unsigned long ip)
+__lock_set_class(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, unsigned int subclass,
+ unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
@@ -2722,6 +2721,7 @@ __lock_set_subclass(struct lockdep_map *lock,
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
+ lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
hlock->class_idx = class - lock_classes + 1;
@@ -2906,9 +2906,9 @@ static void check_flags(unsigned long flags)
#endif
}
-void
-lock_set_subclass(struct lockdep_map *lock,
- unsigned int subclass, unsigned long ip)
+void lock_set_class(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, unsigned int subclass,
+ unsigned long ip)
{
unsigned long flags;
@@ -2918,13 +2918,12 @@ lock_set_subclass(struct lockdep_map *lock,
raw_local_irq_save(flags);
current->lockdep_recursion = 1;
check_flags(flags);
- if (__lock_set_subclass(lock, subclass, ip))
+ if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-
-EXPORT_SYMBOL_GPL(lock_set_subclass);
+EXPORT_SYMBOL_GPL(lock_set_class);
/*
* We are not always called with irqs disabled - do that here,
@@ -2948,7 +2947,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-
EXPORT_SYMBOL_GPL(lock_acquire);
void lock_release(struct lockdep_map *lock, int nested,
@@ -2966,7 +2964,6 @@ void lock_release(struct lockdep_map *lock, int nested,
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-
EXPORT_SYMBOL_GPL(lock_release);
#ifdef CONFIG_LOCK_STAT
@@ -3451,7 +3448,6 @@ retry:
if (unlock)
read_unlock(&tasklist_lock);
}
-
EXPORT_SYMBOL_GPL(debug_show_all_locks);
/*
@@ -3472,7 +3468,6 @@ void debug_show_held_locks(struct task_struct *task)
{
__debug_show_held_locks(task);
}
-
EXPORT_SYMBOL_GPL(debug_show_held_locks);
void lockdep_sys_exit(void)
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 1d3ef29a2583..63598dca2d0c 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -80,12 +80,6 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
goto out_pid;
}
- new_nsp->user_ns = copy_user_ns(flags, tsk->nsproxy->user_ns);
- if (IS_ERR(new_nsp->user_ns)) {
- err = PTR_ERR(new_nsp->user_ns);
- goto out_user;
- }
-
new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
if (IS_ERR(new_nsp->net_ns)) {
err = PTR_ERR(new_nsp->net_ns);
@@ -95,9 +89,6 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
return new_nsp;
out_net:
- if (new_nsp->user_ns)
- put_user_ns(new_nsp->user_ns);
-out_user:
if (new_nsp->pid_ns)
put_pid_ns(new_nsp->pid_ns);
out_pid:
@@ -130,7 +121,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
get_nsproxy(old_ns);
if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
- CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNET)))
+ CLONE_NEWPID | CLONE_NEWNET)))
return 0;
if (!capable(CAP_SYS_ADMIN)) {
@@ -173,8 +164,6 @@ void free_nsproxy(struct nsproxy *ns)
put_ipc_ns(ns->ipc_ns);
if (ns->pid_ns)
put_pid_ns(ns->pid_ns);
- if (ns->user_ns)
- put_user_ns(ns->user_ns);
put_net(ns->net_ns);
kmem_cache_free(nsproxy_cachep, ns);
}
@@ -189,7 +178,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
int err = 0;
if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
- CLONE_NEWUSER | CLONE_NEWNET)))
+ CLONE_NEWNET)))
return 0;
if (!capable(CAP_SYS_ADMIN))
diff --git a/kernel/panic.c b/kernel/panic.c
index 4d5088355bfe..13f06349a786 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -21,6 +21,7 @@
#include <linux/debug_locks.h>
#include <linux/random.h>
#include <linux/kallsyms.h>
+#include <linux/dmi.h>
int panic_on_oops;
static unsigned long tainted_mask;
@@ -321,36 +322,27 @@ void oops_exit(void)
}
#ifdef WANT_WARN_ON_SLOWPATH
-void warn_on_slowpath(const char *file, int line)
-{
- char function[KSYM_SYMBOL_LEN];
- unsigned long caller = (unsigned long) __builtin_return_address(0);
- sprint_symbol(function, caller);
-
- printk(KERN_WARNING "------------[ cut here ]------------\n");
- printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
- line, function);
- print_modules();
- dump_stack();
- print_oops_end_marker();
- add_taint(TAINT_WARN);
-}
-EXPORT_SYMBOL(warn_on_slowpath);
-
-
void warn_slowpath(const char *file, int line, const char *fmt, ...)
{
va_list args;
char function[KSYM_SYMBOL_LEN];
unsigned long caller = (unsigned long)__builtin_return_address(0);
+ const char *board;
+
sprint_symbol(function, caller);
printk(KERN_WARNING "------------[ cut here ]------------\n");
printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
line, function);
- va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
+ board = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (board)
+ printk(KERN_WARNING "Hardware name: %s\n", board);
+
+ if (fmt) {
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+ }
print_modules();
dump_stack();
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5e79c662294b..887c63787de6 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock);
* must supply functions here, even if the function just returns
* ENOSYS. The standard POSIX timer management code assumes the
* following: 1.) The k_itimer struct (sched.h) is used for the
- * timer. 2.) The list, it_lock, it_clock, it_id and it_process
+ * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
* fields are not modified by timer code.
*
* At this time all functions EXCEPT clock_nanosleep can be
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer)
return 0;
}
+static int no_timer_create(struct k_itimer *new_timer)
+{
+ return -EOPNOTSUPP;
+}
+
/*
* Return nonzero if we know a priori this clockid_t value is bogus.
*/
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void)
.clock_getres = hrtimer_get_res,
.clock_get = posix_get_monotonic_raw,
.clock_set = do_posix_clock_nosettime,
+ .timer_create = no_timer_create,
};
register_posix_clock(CLOCK_REALTIME, &clock_realtime);
@@ -313,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info)
int posix_timer_event(struct k_itimer *timr, int si_private)
{
- int shared, ret;
+ struct task_struct *task;
+ int shared, ret = -1;
/*
* FIXME: if ->sigq is queued we can race with
* dequeue_signal()->do_schedule_next_timer().
@@ -327,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/
timr->sigq->info.si_sys_private = si_private;
- shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
- ret = send_sigqueue(timr->sigq, timr->it_process, shared);
+ rcu_read_lock();
+ task = pid_task(timr->it_pid, PIDTYPE_PID);
+ if (task) {
+ shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+ ret = send_sigqueue(timr->sigq, task, shared);
+ }
+ rcu_read_unlock();
/* If we failed to send the signal the timer stops. */
return ret > 0;
}
@@ -405,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
return ret;
}
-static struct task_struct * good_sigevent(sigevent_t * event)
+static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
@@ -419,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
return NULL;
- return rtn;
+ return task_pid(rtn);
}
void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
@@ -458,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
idr_remove(&posix_timers_id, tmr->it_id);
spin_unlock_irqrestore(&idr_lock, flags);
}
+ put_pid(tmr->it_pid);
sigqueue_free(tmr->sigq);
kmem_cache_free(posix_timers_cache, tmr);
}
@@ -471,7 +484,6 @@ sys_timer_create(const clockid_t which_clock,
{
struct k_itimer *new_timer;
int error, new_timer_id;
- struct task_struct *process;
sigevent_t event;
int it_id_set = IT_ID_NOT_SET;
@@ -525,11 +537,9 @@ sys_timer_create(const clockid_t which_clock,
goto out;
}
rcu_read_lock();
- process = good_sigevent(&event);
- if (process)
- get_task_struct(process);
+ new_timer->it_pid = get_pid(good_sigevent(&event));
rcu_read_unlock();
- if (!process) {
+ if (!new_timer->it_pid) {
error = -EINVAL;
goto out;
}
@@ -537,8 +547,7 @@ sys_timer_create(const clockid_t which_clock,
event.sigev_notify = SIGEV_SIGNAL;
event.sigev_signo = SIGALRM;
event.sigev_value.sival_int = new_timer->it_id;
- process = current->group_leader;
- get_task_struct(process);
+ new_timer->it_pid = get_pid(task_tgid(current));
}
new_timer->it_sigev_notify = event.sigev_notify;
@@ -548,7 +557,7 @@ sys_timer_create(const clockid_t which_clock,
new_timer->sigq->info.si_code = SI_TIMER;
spin_lock_irq(&current->sighand->siglock);
- new_timer->it_process = process;
+ new_timer->it_signal = current->signal;
list_add(&new_timer->list, &current->signal->posix_timers);
spin_unlock_irq(&current->sighand->siglock);
@@ -583,8 +592,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
timr = idr_find(&posix_timers_id, (int)timer_id);
if (timr) {
spin_lock(&timr->it_lock);
- if (timr->it_process &&
- same_thread_group(timr->it_process, current)) {
+ if (timr->it_signal == current->signal) {
spin_unlock(&idr_lock);
return timr;
}
@@ -831,8 +839,7 @@ retry_delete:
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
- put_task_struct(timer->it_process);
- timer->it_process = NULL;
+ timer->it_signal = NULL;
unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET);
@@ -858,8 +865,7 @@ retry_delete:
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
- put_task_struct(timer->it_process);
- timer->it_process = NULL;
+ timer->it_signal = NULL;
unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET);
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 72016f051477..97890831e1b5 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -27,7 +27,7 @@ static DECLARE_WORK(poweroff_work, do_poweroff);
static void handle_poweroff(int key, struct tty_struct *tty)
{
/* run sysrq poweroff on boot cpu */
- schedule_work_on(first_cpu(cpu_online_map), &poweroff_work);
+ schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
}
static struct sysrq_key_op sysrq_poweroff_op = {
diff --git a/kernel/printk.c b/kernel/printk.c
index f492f1583d77..e651ab05655f 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -662,7 +662,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
if (recursion_bug) {
recursion_bug = 0;
strcpy(printk_buf, recursion_bug_msg);
- printed_len = sizeof(recursion_bug_msg);
+ printed_len = strlen(recursion_bug_msg);
}
/* Emit the output into the temporary buffer */
printed_len += vscnprintf(printk_buf + printed_len,
diff --git a/kernel/profile.c b/kernel/profile.c
index 4cb7d68fed82..d18e2d2654f2 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -45,7 +45,7 @@ static unsigned long prof_len, prof_shift;
int prof_on __read_mostly;
EXPORT_SYMBOL_GPL(prof_on);
-static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
+static cpumask_var_t prof_cpu_mask;
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
static DEFINE_PER_CPU(int, cpu_profile_flip);
@@ -113,9 +113,13 @@ int __ref profile_init(void)
buffer_bytes = prof_len*sizeof(atomic_t);
if (!slab_is_available()) {
prof_buffer = alloc_bootmem(buffer_bytes);
+ alloc_bootmem_cpumask_var(&prof_cpu_mask);
return 0;
}
+ if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
if (prof_buffer)
return 0;
@@ -128,6 +132,7 @@ int __ref profile_init(void)
if (prof_buffer)
return 0;
+ free_cpumask_var(prof_cpu_mask);
return -ENOMEM;
}
@@ -386,13 +391,15 @@ out_free:
return NOTIFY_BAD;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- cpu_set(cpu, prof_cpu_mask);
+ if (prof_cpu_mask != NULL)
+ cpumask_set_cpu(cpu, prof_cpu_mask);
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- cpu_clear(cpu, prof_cpu_mask);
+ if (prof_cpu_mask != NULL)
+ cpumask_clear_cpu(cpu, prof_cpu_mask);
if (per_cpu(cpu_profile_hits, cpu)[0]) {
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
per_cpu(cpu_profile_hits, cpu)[0] = NULL;
@@ -430,7 +437,8 @@ void profile_tick(int type)
if (type == CPU_PROFILING && timer_hook)
timer_hook(regs);
- if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
+ if (!user_mode(regs) && prof_cpu_mask != NULL &&
+ cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
profile_hit(type, (void *)profile_pc(regs));
}
@@ -442,7 +450,7 @@ void profile_tick(int type)
static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- int len = cpumask_scnprintf(page, count, (cpumask_t *)data);
+ int len = cpumask_scnprintf(page, count, data);
if (count - len < 2)
return -EINVAL;
len += sprintf(page + len, "\n");
@@ -452,16 +460,20 @@ static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
static int prof_cpu_mask_write_proc(struct file *file,
const char __user *buffer, unsigned long count, void *data)
{
- cpumask_t *mask = (cpumask_t *)data;
+ struct cpumask *mask = data;
unsigned long full_count = count, err;
- cpumask_t new_value;
+ cpumask_var_t new_value;
- err = cpumask_parse_user(buffer, count, &new_value);
- if (err)
- return err;
+ if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+ return -ENOMEM;
- *mask = new_value;
- return full_count;
+ err = cpumask_parse_user(buffer, count, new_value);
+ if (!err) {
+ cpumask_copy(mask, new_value);
+ err = full_count;
+ }
+ free_cpumask_var(new_value);
+ return err;
}
void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
@@ -472,7 +484,7 @@ void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
if (!entry)
return;
- entry->data = (void *)&prof_cpu_mask;
+ entry->data = prof_cpu_mask;
entry->read_proc = prof_cpu_mask_read_proc;
entry->write_proc = prof_cpu_mask_write_proc;
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 4c8bcd7dd8e0..29dc700e198c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -25,6 +25,17 @@
#include <asm/pgtable.h>
#include <asm/uaccess.h>
+
+/*
+ * Initialize a new task whose father had been ptraced.
+ *
+ * Called from copy_process().
+ */
+void ptrace_fork(struct task_struct *child, unsigned long clone_flags)
+{
+ arch_ptrace_fork(child, clone_flags);
+}
+
/*
* ptrace a task: make the debugger its new parent and
* move it to the ptrace list.
@@ -72,6 +83,7 @@ void __ptrace_unlink(struct task_struct *child)
child->parent = child->real_parent;
list_del_init(&child->ptrace_entry);
+ arch_ptrace_untrace(child);
if (task_is_traced(child))
ptrace_untrace(child);
}
@@ -115,6 +127,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
+ const struct cred *cred = current_cred(), *tcred;
+
/* May we inspect the given task?
* This check is used both for attaching with ptrace
* and for allowing access to sensitive information in /proc.
@@ -127,13 +141,19 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
/* Don't let security modules deny introspection */
if (task == current)
return 0;
- if (((current->uid != task->euid) ||
- (current->uid != task->suid) ||
- (current->uid != task->uid) ||
- (current->gid != task->egid) ||
- (current->gid != task->sgid) ||
- (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if ((cred->uid != tcred->euid ||
+ cred->uid != tcred->suid ||
+ cred->uid != tcred->uid ||
+ cred->gid != tcred->egid ||
+ cred->gid != tcred->sgid ||
+ cred->gid != tcred->gid) &&
+ !capable(CAP_SYS_PTRACE)) {
+ rcu_read_unlock();
return -EPERM;
+ }
+ rcu_read_unlock();
smp_rmb();
if (task->mm)
dumpable = get_dumpable(task->mm);
@@ -163,6 +183,14 @@ int ptrace_attach(struct task_struct *task)
if (same_thread_group(task, current))
goto out;
+ /* Protect exec's credential calculations against our interference;
+ * SUID, SGID and LSM creds get determined differently under ptrace.
+ */
+ retval = mutex_lock_interruptible(&current->cred_exec_mutex);
+ if (retval < 0)
+ goto out;
+
+ retval = -EPERM;
repeat:
/*
* Nasty, nasty.
@@ -202,6 +230,7 @@ repeat:
bad:
write_unlock_irqrestore(&tasklist_lock, flags);
task_unlock(task);
+ mutex_unlock(&current->cred_exec_mutex);
out:
return retval;
}
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index c03ca3e61919..6ec495f60ead 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -63,14 +63,14 @@ static struct rcu_ctrlblk rcu_ctrlblk = {
.completed = -300,
.pending = -300,
.lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
- .cpumask = CPU_MASK_NONE,
+ .cpumask = CPU_BITS_NONE,
};
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.cur = -300,
.completed = -300,
.pending = -300,
.lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
- .cpumask = CPU_MASK_NONE,
+ .cpumask = CPU_BITS_NONE,
};
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
@@ -85,7 +85,6 @@ static void force_quiescent_state(struct rcu_data *rdp,
struct rcu_ctrlblk *rcp)
{
int cpu;
- cpumask_t cpumask;
unsigned long flags;
set_need_resched();
@@ -96,10 +95,10 @@ static void force_quiescent_state(struct rcu_data *rdp,
* Don't send IPI to itself. With irqs disabled,
* rdp->cpu is the current cpu.
*
- * cpu_online_map is updated by the _cpu_down()
+ * cpu_online_mask is updated by the _cpu_down()
* using __stop_machine(). Since we're in irqs disabled
* section, __stop_machine() is not exectuting, hence
- * the cpu_online_map is stable.
+ * the cpu_online_mask is stable.
*
* However, a cpu might have been offlined _just_ before
* we disabled irqs while entering here.
@@ -107,13 +106,14 @@ static void force_quiescent_state(struct rcu_data *rdp,
* notification, leading to the offlined cpu's bit
* being set in the rcp->cpumask.
*
- * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
+ * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
* sending smp_reschedule() to an offlined CPU.
*/
- cpus_and(cpumask, rcp->cpumask, cpu_online_map);
- cpu_clear(rdp->cpu, cpumask);
- for_each_cpu_mask_nr(cpu, cpumask)
- smp_send_reschedule(cpu);
+ for_each_cpu_and(cpu,
+ to_cpumask(rcp->cpumask), cpu_online_mask) {
+ if (cpu != rdp->cpu)
+ smp_send_reschedule(cpu);
+ }
}
spin_unlock_irqrestore(&rcp->lock, flags);
}
@@ -193,7 +193,7 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
printk(KERN_ERR "INFO: RCU detected CPU stalls:");
for_each_possible_cpu(cpu) {
- if (cpu_isset(cpu, rcp->cpumask))
+ if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
printk(" %d", cpu);
}
printk(" (detected by %d, t=%ld jiffies)\n",
@@ -221,7 +221,8 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
long delta;
delta = jiffies - rcp->jiffies_stall;
- if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) {
+ if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
+ delta >= 0) {
/* We haven't checked in, so go dump stack. */
print_cpu_stall(rcp);
@@ -406,8 +407,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
*/
static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
{
- cpu_clear(cpu, rcp->cpumask);
- if (cpus_empty(rcp->cpumask)) {
+ cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
+ if (cpumask_empty(to_cpumask(rcp->cpumask))) {
/* batch completed ! */
rcp->completed = rcp->cur;
rcu_start_batch(rcp);
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 59236e8b9daa..f9dc8f3720f6 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -164,7 +164,8 @@ static char *rcu_try_flip_state_names[] =
{ "idle", "waitack", "waitzero", "waitmb" };
#endif /* #ifdef CONFIG_RCU_TRACE */
-static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE;
+static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly
+ = CPU_BITS_NONE;
/*
* Enum and per-CPU flag to determine when each CPU has seen
@@ -551,6 +552,16 @@ void rcu_irq_exit(void)
}
}
+void rcu_nmi_enter(void)
+{
+ rcu_irq_enter();
+}
+
+void rcu_nmi_exit(void)
+{
+ rcu_irq_exit();
+}
+
static void dyntick_save_progress_counter(int cpu)
{
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
@@ -748,7 +759,7 @@ rcu_try_flip_idle(void)
/* Now ask each CPU for acknowledgement of the flip. */
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
dyntick_save_progress_counter(cpu);
}
@@ -766,7 +777,7 @@ rcu_try_flip_waitack(void)
int cpu;
RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
if (rcu_try_flip_waitack_needed(cpu) &&
per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -798,7 +809,7 @@ rcu_try_flip_waitzero(void)
/* Check to see if the sum of the "last" counters is zero. */
RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
if (sum != 0) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -813,7 +824,7 @@ rcu_try_flip_waitzero(void)
smp_mb(); /* ^^^^^^^^^^^^ */
/* Call for a memory barrier from each CPU. */
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
dyntick_save_progress_counter(cpu);
}
@@ -833,7 +844,7 @@ rcu_try_flip_waitmb(void)
int cpu;
RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
- for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
+ for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
if (rcu_try_flip_waitmb_needed(cpu) &&
per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
@@ -1022,7 +1033,7 @@ void rcu_offline_cpu(int cpu)
RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
- cpu_clear(cpu, rcu_cpu_online_map);
+ cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map));
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
@@ -1062,7 +1073,7 @@ void __cpuinit rcu_online_cpu(int cpu)
struct rcu_data *rdp;
spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
- cpu_set(cpu, rcu_cpu_online_map);
+ cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map));
spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
/*
@@ -1420,7 +1431,7 @@ void __init __rcu_init(void)
* We don't need protection against CPU-Hotplug here
* since
* a) If a CPU comes online while we are iterating over the
- * cpu_online_map below, we would only end up making a
+ * cpu_online_mask below, we would only end up making a
* duplicate call to rcu_online_cpu() which sets the corresponding
* CPU's mask in the rcu_cpu_online_map.
*
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
index 35c2d3360ecf..7c2665cac172 100644
--- a/kernel/rcupreempt_trace.c
+++ b/kernel/rcupreempt_trace.c
@@ -149,12 +149,12 @@ static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
sp->done_length += cp->done_length;
sp->done_add += cp->done_add;
sp->done_remove += cp->done_remove;
- atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked));
+ atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked);
sp->rcu_check_callbacks += cp->rcu_check_callbacks;
- atomic_set(&sp->rcu_try_flip_1,
- atomic_read(&cp->rcu_try_flip_1));
- atomic_set(&sp->rcu_try_flip_e1,
- atomic_read(&cp->rcu_try_flip_e1));
+ atomic_add(atomic_read(&cp->rcu_try_flip_1),
+ &sp->rcu_try_flip_1);
+ atomic_add(atomic_read(&cp->rcu_try_flip_e1),
+ &sp->rcu_try_flip_e1);
sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 85cb90588a55..3245b40952c6 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -39,6 +39,7 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
+#include <linux/reboot.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h>
@@ -108,7 +109,6 @@ struct rcu_torture {
int rtort_mbtest;
};
-static int fullstop = 0; /* stop generating callbacks at test end. */
static LIST_HEAD(rcu_torture_freelist);
static struct rcu_torture *rcu_torture_current = NULL;
static long rcu_torture_current_version = 0;
@@ -136,6 +136,30 @@ static int stutter_pause_test = 0;
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
+#define FULLSTOP_SIGNALED 1 /* Bail due to signal. */
+#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
+static int fullstop; /* stop generating callbacks at test end. */
+DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
+ /* spawning of kthreads. */
+
+/*
+ * Detect and respond to a signal-based shutdown.
+ */
+static int
+rcutorture_shutdown_notify(struct notifier_block *unused1,
+ unsigned long unused2, void *unused3)
+{
+ if (fullstop)
+ return NOTIFY_DONE;
+ if (signal_pending(current)) {
+ mutex_lock(&fullstop_mutex);
+ if (!ACCESS_ONCE(fullstop))
+ fullstop = FULLSTOP_SIGNALED;
+ mutex_unlock(&fullstop_mutex);
+ }
+ return NOTIFY_DONE;
+}
+
/*
* Allocate an element from the rcu_tortures pool.
*/
@@ -199,11 +223,12 @@ rcu_random(struct rcu_random_state *rrsp)
static void
rcu_stutter_wait(void)
{
- while (stutter_pause_test || !rcutorture_runnable)
+ while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
if (rcutorture_runnable)
schedule_timeout_interruptible(1);
else
schedule_timeout_interruptible(round_jiffies_relative(HZ));
+ }
}
/*
@@ -599,7 +624,7 @@ rcu_torture_writer(void *arg)
rcu_stutter_wait();
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
- while (!kthread_should_stop())
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -624,7 +649,7 @@ rcu_torture_fakewriter(void *arg)
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
- while (!kthread_should_stop())
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -734,7 +759,7 @@ rcu_torture_reader(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
if (irqreader && cur_ops->irqcapable)
del_timer_sync(&t);
- while (!kthread_should_stop())
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -831,7 +856,7 @@ rcu_torture_stats(void *arg)
do {
schedule_timeout_interruptible(stat_interval * HZ);
rcu_torture_stats_print();
- } while (!kthread_should_stop());
+ } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
return 0;
}
@@ -843,49 +868,52 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_t tmp_mask;
+ cpumask_var_t tmp_mask;
int i;
- cpus_setall(tmp_mask);
+ if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
+ BUG();
+
+ cpumask_setall(tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
- if (num_online_cpus() == 1) {
- put_online_cpus();
- return;
- }
+ if (num_online_cpus() == 1)
+ goto out;
if (rcu_idle_cpu != -1)
- cpu_clear(rcu_idle_cpu, tmp_mask);
+ cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
- set_cpus_allowed_ptr(current, &tmp_mask);
+ set_cpus_allowed_ptr(current, tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
- &tmp_mask);
+ tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
- &tmp_mask);
+ tmp_mask);
}
if (writer_task)
- set_cpus_allowed_ptr(writer_task, &tmp_mask);
+ set_cpus_allowed_ptr(writer_task, tmp_mask);
if (stats_task)
- set_cpus_allowed_ptr(stats_task, &tmp_mask);
+ set_cpus_allowed_ptr(stats_task, tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
else
rcu_idle_cpu--;
+out:
put_online_cpus();
+ free_cpumask_var(tmp_mask);
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
@@ -899,7 +927,7 @@ rcu_torture_shuffle(void *arg)
do {
schedule_timeout_interruptible(shuffle_interval * HZ);
rcu_torture_shuffle_tasks();
- } while (!kthread_should_stop());
+ } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
return 0;
}
@@ -914,10 +942,10 @@ rcu_torture_stutter(void *arg)
do {
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 1;
- if (!kthread_should_stop())
+ if (!kthread_should_stop() && !fullstop)
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 0;
- } while (!kthread_should_stop());
+ } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
return 0;
}
@@ -934,12 +962,27 @@ rcu_torture_print_module_parms(char *tag)
stutter, irqreader);
}
+static struct notifier_block rcutorture_nb = {
+ .notifier_call = rcutorture_shutdown_notify,
+};
+
static void
rcu_torture_cleanup(void)
{
int i;
- fullstop = 1;
+ mutex_lock(&fullstop_mutex);
+ if (!fullstop) {
+ /* If being signaled, let it happen, then exit. */
+ mutex_unlock(&fullstop_mutex);
+ schedule_timeout_interruptible(10 * HZ);
+ if (cur_ops->cb_barrier != NULL)
+ cur_ops->cb_barrier();
+ return;
+ }
+ fullstop = FULLSTOP_CLEANUP;
+ mutex_unlock(&fullstop_mutex);
+ unregister_reboot_notifier(&rcutorture_nb);
if (stutter_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
kthread_stop(stutter_task);
@@ -1015,6 +1058,8 @@ rcu_torture_init(void)
{ &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
&srcu_ops, &sched_ops, &sched_ops_sync, };
+ mutex_lock(&fullstop_mutex);
+
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
cur_ops = torture_ops[i];
@@ -1024,6 +1069,7 @@ rcu_torture_init(void)
if (i == ARRAY_SIZE(torture_ops)) {
printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
torture_type);
+ mutex_unlock(&fullstop_mutex);
return (-EINVAL);
}
if (cur_ops->init)
@@ -1146,9 +1192,12 @@ rcu_torture_init(void)
goto unwind;
}
}
+ register_reboot_notifier(&rcutorture_nb);
+ mutex_unlock(&fullstop_mutex);
return 0;
unwind:
+ mutex_unlock(&fullstop_mutex);
rcu_torture_cleanup();
return firsterr;
}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
new file mode 100644
index 000000000000..a342b032112c
--- /dev/null
+++ b/kernel/rcutree.c
@@ -0,0 +1,1535 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Authors: Dipankar Sarma <dipankar@in.ibm.com>
+ * Manfred Spraul <manfred@colorfullife.com>
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
+/* Data structures. */
+
+#define RCU_STATE_INITIALIZER(name) { \
+ .level = { &name.node[0] }, \
+ .levelcnt = { \
+ NUM_RCU_LVL_0, /* root of hierarchy. */ \
+ NUM_RCU_LVL_1, \
+ NUM_RCU_LVL_2, \
+ NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
+ }, \
+ .signaled = RCU_SIGNAL_INIT, \
+ .gpnum = -300, \
+ .completed = -300, \
+ .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
+ .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
+ .n_force_qs = 0, \
+ .n_force_qs_ngp = 0, \
+}
+
+struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state);
+DEFINE_PER_CPU(struct rcu_data, rcu_data);
+
+struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+
+#ifdef CONFIG_NO_HZ
+DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks);
+#endif /* #ifdef CONFIG_NO_HZ */
+
+static int blimit = 10; /* Maximum callbacks per softirq. */
+static int qhimark = 10000; /* If this many pending, ignore blimit. */
+static int qlowmark = 100; /* Once only this many pending, use blimit. */
+
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
+
+/*
+ * Return the number of RCU batches processed thus far for debug & stats.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_state.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU BH batches processed thus far for debug & stats.
+ */
+long rcu_batches_completed_bh(void)
+{
+ return rcu_bh_state.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+/*
+ * Does the CPU have callbacks ready to be invoked?
+ */
+static int
+cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
+{
+ return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
+}
+
+/*
+ * Does the current CPU require a yet-as-unscheduled grace period?
+ */
+static int
+cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ /* ACCESS_ONCE() because we are accessing outside of lock. */
+ return *rdp->nxttail[RCU_DONE_TAIL] &&
+ ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
+}
+
+/*
+ * Return the root node of the specified rcu_state structure.
+ */
+static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+{
+ return &rsp->node[0];
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * If the specified CPU is offline, tell the caller that it is in
+ * a quiescent state. Otherwise, whack it with a reschedule IPI.
+ * Grace periods can end up waiting on an offline CPU when that
+ * CPU is in the process of coming online -- it will be added to the
+ * rcu_node bitmasks before it actually makes it online. The same thing
+ * can happen while a CPU is in the process of coming online. Because this
+ * race is quite rare, we check for it after detecting that the grace
+ * period has been delayed rather than checking each and every CPU
+ * each and every time we start a new grace period.
+ */
+static int rcu_implicit_offline_qs(struct rcu_data *rdp)
+{
+ /*
+ * If the CPU is offline, it is in a quiescent state. We can
+ * trust its state not to change because interrupts are disabled.
+ */
+ if (cpu_is_offline(rdp->cpu)) {
+ rdp->offline_fqs++;
+ return 1;
+ }
+
+ /* The CPU is online, so send it a reschedule IPI. */
+ if (rdp->cpu != smp_processor_id())
+ smp_send_reschedule(rdp->cpu);
+ else
+ set_need_resched();
+ rdp->resched_ipi++;
+ return 0;
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#ifdef CONFIG_NO_HZ
+static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
+
+/**
+ * rcu_enter_nohz - inform RCU that current CPU is entering nohz
+ *
+ * Enter nohz mode, in other words, -leave- the mode in which RCU
+ * read-side critical sections can occur. (Though RCU read-side
+ * critical sections can occur in irq handlers in nohz mode, a possibility
+ * handled by rcu_irq_enter() and rcu_irq_exit()).
+ */
+void rcu_enter_nohz(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ rdtp->dynticks++;
+ rdtp->dynticks_nesting--;
+ WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+ local_irq_restore(flags);
+}
+
+/*
+ * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
+ *
+ * Exit nohz mode, in other words, -enter- the mode in which RCU
+ * read-side critical sections normally occur.
+ */
+void rcu_exit_nohz(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ rdtp->dynticks++;
+ rdtp->dynticks_nesting++;
+ WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+ local_irq_restore(flags);
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_nmi_enter - inform RCU of entry to NMI context
+ *
+ * If the CPU was idle with dynamic ticks active, and there is no
+ * irq handler running, this updates rdtp->dynticks_nmi to let the
+ * RCU grace-period handling know that the CPU is active.
+ */
+void rcu_nmi_enter(void)
+{
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks & 0x1)
+ return;
+ rdtp->dynticks_nmi++;
+ WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs);
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_nmi_exit - inform RCU of exit from NMI context
+ *
+ * If the CPU was idle with dynamic ticks active, and there is no
+ * irq handler running, this updates rdtp->dynticks_nmi to let the
+ * RCU grace-period handling know that the CPU is no longer active.
+ */
+void rcu_nmi_exit(void)
+{
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks & 0x1)
+ return;
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ rdtp->dynticks_nmi++;
+ WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs);
+}
+
+/**
+ * rcu_irq_enter - inform RCU of entry to hard irq context
+ *
+ * If the CPU was idle with dynamic ticks active, this updates the
+ * rdtp->dynticks to let the RCU handling know that the CPU is active.
+ */
+void rcu_irq_enter(void)
+{
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks_nesting++)
+ return;
+ rdtp->dynticks++;
+ WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_irq_exit - inform RCU of exit from hard irq context
+ *
+ * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
+ * to put let the RCU handling be aware that the CPU is going back to idle
+ * with no ticks.
+ */
+void rcu_irq_exit(void)
+{
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (--rdtp->dynticks_nesting)
+ return;
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ rdtp->dynticks++;
+ WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+
+ /* If the interrupt queued a callback, get out of dyntick mode. */
+ if (__get_cpu_var(rcu_data).nxtlist ||
+ __get_cpu_var(rcu_bh_data).nxtlist)
+ set_need_resched();
+}
+
+/*
+ * Record the specified "completed" value, which is later used to validate
+ * dynticks counter manipulations. Specify "rsp->completed - 1" to
+ * unconditionally invalidate any future dynticks manipulations (which is
+ * useful at the beginning of a grace period).
+ */
+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
+{
+ rsp->dynticks_completed = comp;
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * Recall the previously recorded value of the completion for dynticks.
+ */
+static long dyntick_recall_completed(struct rcu_state *rsp)
+{
+ return rsp->dynticks_completed;
+}
+
+/*
+ * Snapshot the specified CPU's dynticks counter so that we can later
+ * credit them with an implicit quiescent state. Return 1 if this CPU
+ * is already in a quiescent state courtesy of dynticks idle mode.
+ */
+static int dyntick_save_progress_counter(struct rcu_data *rdp)
+{
+ int ret;
+ int snap;
+ int snap_nmi;
+
+ snap = rdp->dynticks->dynticks;
+ snap_nmi = rdp->dynticks->dynticks_nmi;
+ smp_mb(); /* Order sampling of snap with end of grace period. */
+ rdp->dynticks_snap = snap;
+ rdp->dynticks_nmi_snap = snap_nmi;
+ ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
+ if (ret)
+ rdp->dynticks_fqs++;
+ return ret;
+}
+
+/*
+ * Return true if the specified CPU has passed through a quiescent
+ * state by virtue of being in or having passed through an dynticks
+ * idle state since the last call to dyntick_save_progress_counter()
+ * for this same CPU.
+ */
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+{
+ long curr;
+ long curr_nmi;
+ long snap;
+ long snap_nmi;
+
+ curr = rdp->dynticks->dynticks;
+ snap = rdp->dynticks_snap;
+ curr_nmi = rdp->dynticks->dynticks_nmi;
+ snap_nmi = rdp->dynticks_nmi_snap;
+ smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+
+ /*
+ * If the CPU passed through or entered a dynticks idle phase with
+ * no active irq/NMI handlers, then we can safely pretend that the CPU
+ * already acknowledged the request to pass through a quiescent
+ * state. Either way, that CPU cannot possibly be in an RCU
+ * read-side critical section that started before the beginning
+ * of the current RCU grace period.
+ */
+ if ((curr != snap || (curr & 0x1) == 0) &&
+ (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
+ rdp->dynticks_fqs++;
+ return 1;
+ }
+
+ /* Go check for the CPU being offline. */
+ return rcu_implicit_offline_qs(rdp);
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#else /* #ifdef CONFIG_NO_HZ */
+
+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
+{
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * If there are no dynticks, then the only way that a CPU can passively
+ * be in a quiescent state is to be offline. Unlike dynticks idle, which
+ * is a point in time during the prior (already finished) grace period,
+ * an offline CPU is always in a quiescent state, and thus can be
+ * unconditionally applied. So just return the current value of completed.
+ */
+static long dyntick_recall_completed(struct rcu_state *rsp)
+{
+ return rsp->completed;
+}
+
+static int dyntick_save_progress_counter(struct rcu_data *rdp)
+{
+ return 0;
+}
+
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+{
+ return rcu_implicit_offline_qs(rdp);
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#endif /* #else #ifdef CONFIG_NO_HZ */
+
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+
+static void record_gp_stall_check_time(struct rcu_state *rsp)
+{
+ rsp->gp_start = jiffies;
+ rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
+}
+
+static void print_other_cpu_stall(struct rcu_state *rsp)
+{
+ int cpu;
+ long delta;
+ unsigned long flags;
+ struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+ struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
+
+ /* Only let one CPU complain about others per time interval. */
+
+ spin_lock_irqsave(&rnp->lock, flags);
+ delta = jiffies - rsp->jiffies_stall;
+ if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) {
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+ spin_unlock_irqrestore(&rnp->lock, flags);
+
+ /* OK, time to rat on our buddy... */
+
+ printk(KERN_ERR "INFO: RCU detected CPU stalls:");
+ for (; rnp_cur < rnp_end; rnp_cur++) {
+ if (rnp_cur->qsmask == 0)
+ continue;
+ for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++)
+ if (rnp_cur->qsmask & (1UL << cpu))
+ printk(" %d", rnp_cur->grplo + cpu);
+ }
+ printk(" (detected by %d, t=%ld jiffies)\n",
+ smp_processor_id(), (long)(jiffies - rsp->gp_start));
+ force_quiescent_state(rsp, 0); /* Kick them all. */
+}
+
+static void print_cpu_stall(struct rcu_state *rsp)
+{
+ unsigned long flags;
+ struct rcu_node *rnp = rcu_get_root(rsp);
+
+ printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
+ smp_processor_id(), jiffies - rsp->gp_start);
+ dump_stack();
+ spin_lock_irqsave(&rnp->lock, flags);
+ if ((long)(jiffies - rsp->jiffies_stall) >= 0)
+ rsp->jiffies_stall =
+ jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ set_need_resched(); /* kick ourselves to get things going. */
+}
+
+static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ long delta;
+ struct rcu_node *rnp;
+
+ delta = jiffies - rsp->jiffies_stall;
+ rnp = rdp->mynode;
+ if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
+
+ /* We haven't checked in, so go dump stack. */
+ print_cpu_stall(rsp);
+
+ } else if (rsp->gpnum != rsp->completed &&
+ delta >= RCU_STALL_RAT_DELAY) {
+
+ /* They had two time units to dump stack, so complain. */
+ print_other_cpu_stall(rsp);
+ }
+}
+
+#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+static void record_gp_stall_check_time(struct rcu_state *rsp)
+{
+}
+
+static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * Update CPU-local rcu_data state to record the newly noticed grace period.
+ * This is used both when we started the grace period and when we notice
+ * that someone else started the grace period.
+ */
+static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ rdp->qs_pending = 1;
+ rdp->passed_quiesc = 0;
+ rdp->gpnum = rsp->gpnum;
+ rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+ RCU_JIFFIES_TILL_FORCE_QS;
+}
+
+/*
+ * Did someone else start a new RCU grace period start since we last
+ * checked? Update local state appropriately if so. Must be called
+ * on the CPU corresponding to rdp.
+ */
+static int
+check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ local_irq_save(flags);
+ if (rdp->gpnum != rsp->gpnum) {
+ note_new_gpnum(rsp, rdp);
+ ret = 1;
+ }
+ local_irq_restore(flags);
+ return ret;
+}
+
+/*
+ * Start a new RCU grace period if warranted, re-initializing the hierarchy
+ * in preparation for detecting the next grace period. The caller must hold
+ * the root node's ->lock, which is released before return. Hard irqs must
+ * be disabled.
+ */
+static void
+rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
+ __releases(rcu_get_root(rsp)->lock)
+{
+ struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+ struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp_cur;
+ struct rcu_node *rnp_end;
+
+ if (!cpu_needs_another_gp(rsp, rdp)) {
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+
+ /* Advance to a new grace period and initialize state. */
+ rsp->gpnum++;
+ rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
+ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
+ rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+ RCU_JIFFIES_TILL_FORCE_QS;
+ record_gp_stall_check_time(rsp);
+ dyntick_record_completed(rsp, rsp->completed - 1);
+ note_new_gpnum(rsp, rdp);
+
+ /*
+ * Because we are first, we know that all our callbacks will
+ * be covered by this upcoming grace period, even the ones
+ * that were registered arbitrarily recently.
+ */
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ /* Special-case the common single-level case. */
+ if (NUM_RCU_NODES == 1) {
+ rnp->qsmask = rnp->qsmaskinit;
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+
+ spin_unlock(&rnp->lock); /* leave irqs disabled. */
+
+
+ /* Exclude any concurrent CPU-hotplug operations. */
+ spin_lock(&rsp->onofflock); /* irqs already disabled. */
+
+ /*
+ * Set the quiescent-state-needed bits in all the non-leaf RCU
+ * nodes for all currently online CPUs. This operation relies
+ * on the layout of the hierarchy within the rsp->node[] array.
+ * Note that other CPUs will access only the leaves of the
+ * hierarchy, which still indicate that no grace period is in
+ * progress. In addition, we have excluded CPU-hotplug operations.
+ *
+ * We therefore do not need to hold any locks. Any required
+ * memory barriers will be supplied by the locks guarding the
+ * leaf rcu_nodes in the hierarchy.
+ */
+
+ rnp_end = rsp->level[NUM_RCU_LVLS - 1];
+ for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
+ rnp_cur->qsmask = rnp_cur->qsmaskinit;
+
+ /*
+ * Now set up the leaf nodes. Here we must be careful. First,
+ * we need to hold the lock in order to exclude other CPUs, which
+ * might be contending for the leaf nodes' locks. Second, as
+ * soon as we initialize a given leaf node, its CPUs might run
+ * up the rest of the hierarchy. We must therefore acquire locks
+ * for each node that we touch during this stage. (But we still
+ * are excluding CPU-hotplug operations.)
+ *
+ * Note that the grace period cannot complete until we finish
+ * the initialization process, as there will be at least one
+ * qsmask bit set in the root node until that time, namely the
+ * one corresponding to this CPU.
+ */
+ rnp_end = &rsp->node[NUM_RCU_NODES];
+ rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+ for (; rnp_cur < rnp_end; rnp_cur++) {
+ spin_lock(&rnp_cur->lock); /* irqs already disabled. */
+ rnp_cur->qsmask = rnp_cur->qsmaskinit;
+ spin_unlock(&rnp_cur->lock); /* irqs already disabled. */
+ }
+
+ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
+ spin_unlock_irqrestore(&rsp->onofflock, flags);
+}
+
+/*
+ * Advance this CPU's callbacks, but only if the current grace period
+ * has ended. This may be called only from the CPU to whom the rdp
+ * belongs.
+ */
+static void
+rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ long completed_snap;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
+
+ /* Did another grace period end? */
+ if (rdp->completed != completed_snap) {
+
+ /* Advance callbacks. No harm if list empty. */
+ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ /* Remember that we saw this grace-period completion. */
+ rdp->completed = completed_snap;
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * Similar to cpu_quiet(), for which it is a helper function. Allows
+ * a group of CPUs to be quieted at one go, though all the CPUs in the
+ * group must be represented by the same leaf rcu_node structure.
+ * That structure's lock must be held upon entry, and it is released
+ * before return.
+ */
+static void
+cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
+ unsigned long flags)
+ __releases(rnp->lock)
+{
+ /* Walk up the rcu_node hierarchy. */
+ for (;;) {
+ if (!(rnp->qsmask & mask)) {
+
+ /* Our bit has already been cleared, so done. */
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ rnp->qsmask &= ~mask;
+ if (rnp->qsmask != 0) {
+
+ /* Other bits still set at this level, so done. */
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ mask = rnp->grpmask;
+ if (rnp->parent == NULL) {
+
+ /* No more levels. Exit loop holding root lock. */
+
+ break;
+ }
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ rnp = rnp->parent;
+ spin_lock_irqsave(&rnp->lock, flags);
+ }
+
+ /*
+ * Get here if we are the last CPU to pass through a quiescent
+ * state for this grace period. Clean up and let rcu_start_gp()
+ * start up the next grace period if one is needed. Note that
+ * we still hold rnp->lock, as required by rcu_start_gp(), which
+ * will release it.
+ */
+ rsp->completed = rsp->gpnum;
+ rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
+ rcu_start_gp(rsp, flags); /* releases rnp->lock. */
+}
+
+/*
+ * Record a quiescent state for the specified CPU, which must either be
+ * the current CPU or an offline CPU. The lastcomp argument is used to
+ * make sure we are still in the grace period of interest. We don't want
+ * to end the current grace period based on quiescent states detected in
+ * an earlier grace period!
+ */
+static void
+cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
+{
+ unsigned long flags;
+ unsigned long mask;
+ struct rcu_node *rnp;
+
+ rnp = rdp->mynode;
+ spin_lock_irqsave(&rnp->lock, flags);
+ if (lastcomp != ACCESS_ONCE(rsp->completed)) {
+
+ /*
+ * Someone beat us to it for this grace period, so leave.
+ * The race with GP start is resolved by the fact that we
+ * hold the leaf rcu_node lock, so that the per-CPU bits
+ * cannot yet be initialized -- so we would simply find our
+ * CPU's bit already cleared in cpu_quiet_msk() if this race
+ * occurred.
+ */
+ rdp->passed_quiesc = 0; /* try again later! */
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ mask = rdp->grpmask;
+ if ((rnp->qsmask & mask) == 0) {
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ } else {
+ rdp->qs_pending = 0;
+
+ /*
+ * This GP can't end until cpu checks in, so all of our
+ * callbacks can be processed during the next GP.
+ */
+ rdp = rsp->rda[smp_processor_id()];
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
+ }
+}
+
+/*
+ * Check to see if there is a new grace period of which this CPU
+ * is not yet aware, and if so, set up local rcu_data state for it.
+ * Otherwise, see if this CPU has just passed through its first
+ * quiescent state for this grace period, and record that fact if so.
+ */
+static void
+rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ /* If there is now a new grace period, record and return. */
+ if (check_for_new_grace_period(rsp, rdp))
+ return;
+
+ /*
+ * Does this CPU still need to do its part for current grace period?
+ * If no, return and let the other CPUs do their part as well.
+ */
+ if (!rdp->qs_pending)
+ return;
+
+ /*
+ * Was there a quiescent state since the beginning of the grace
+ * period? If no, then exit and wait for the next call.
+ */
+ if (!rdp->passed_quiesc)
+ return;
+
+ /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */
+ cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
+ * and move all callbacks from the outgoing CPU to the current one.
+ */
+static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
+{
+ int i;
+ unsigned long flags;
+ long lastcomp;
+ unsigned long mask;
+ struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_data *rdp_me;
+ struct rcu_node *rnp;
+
+ /* Exclude any attempts to start a new grace period. */
+ spin_lock_irqsave(&rsp->onofflock, flags);
+
+ /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
+ rnp = rdp->mynode;
+ mask = rdp->grpmask; /* rnp->grplo is constant. */
+ do {
+ spin_lock(&rnp->lock); /* irqs already disabled. */
+ rnp->qsmaskinit &= ~mask;
+ if (rnp->qsmaskinit != 0) {
+ spin_unlock(&rnp->lock); /* irqs already disabled. */
+ break;
+ }
+ mask = rnp->grpmask;
+ spin_unlock(&rnp->lock); /* irqs already disabled. */
+ rnp = rnp->parent;
+ } while (rnp != NULL);
+ lastcomp = rsp->completed;
+
+ spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
+
+ /* Being offline is a quiescent state, so go record it. */
+ cpu_quiet(cpu, rsp, rdp, lastcomp);
+
+ /*
+ * Move callbacks from the outgoing CPU to the running CPU.
+ * Note that the outgoing CPU is now quiscent, so it is now
+ * (uncharacteristically) safe to access it rcu_data structure.
+ * Note also that we must carefully retain the order of the
+ * outgoing CPU's callbacks in order for rcu_barrier() to work
+ * correctly. Finally, note that we start all the callbacks
+ * afresh, even those that have passed through a grace period
+ * and are therefore ready to invoke. The theory is that hotplug
+ * events are rare, and that if they are frequent enough to
+ * indefinitely delay callbacks, you have far worse things to
+ * be worrying about.
+ */
+ rdp_me = rsp->rda[smp_processor_id()];
+ if (rdp->nxtlist != NULL) {
+ *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
+ rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+ rdp->nxtlist = NULL;
+ for (i = 0; i < RCU_NEXT_SIZE; i++)
+ rdp->nxttail[i] = &rdp->nxtlist;
+ rdp_me->qlen += rdp->qlen;
+ rdp->qlen = 0;
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * Remove the specified CPU from the RCU hierarchy and move any pending
+ * callbacks that it might have to the current CPU. This code assumes
+ * that at least one CPU in the system will remain running at all times.
+ * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
+ */
+static void rcu_offline_cpu(int cpu)
+{
+ __rcu_offline_cpu(cpu, &rcu_state);
+ __rcu_offline_cpu(cpu, &rcu_bh_state);
+}
+
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_offline_cpu(int cpu)
+{
+}
+
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
+/*
+ * Invoke any RCU callbacks that have made it to the end of their grace
+ * period. Thottle as specified by rdp->blimit.
+ */
+static void rcu_do_batch(struct rcu_data *rdp)
+{
+ unsigned long flags;
+ struct rcu_head *next, *list, **tail;
+ int count;
+
+ /* If no callbacks are ready, just return.*/
+ if (!cpu_has_callbacks_ready_to_invoke(rdp))
+ return;
+
+ /*
+ * Extract the list of ready callbacks, disabling to prevent
+ * races with call_rcu() from interrupt handlers.
+ */
+ local_irq_save(flags);
+ list = rdp->nxtlist;
+ rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
+ *rdp->nxttail[RCU_DONE_TAIL] = NULL;
+ tail = rdp->nxttail[RCU_DONE_TAIL];
+ for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
+ if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
+ rdp->nxttail[count] = &rdp->nxtlist;
+ local_irq_restore(flags);
+
+ /* Invoke callbacks. */
+ count = 0;
+ while (list) {
+ next = list->next;
+ prefetch(next);
+ list->func(list);
+ list = next;
+ if (++count >= rdp->blimit)
+ break;
+ }
+
+ local_irq_save(flags);
+
+ /* Update count, and requeue any remaining callbacks. */
+ rdp->qlen -= count;
+ if (list != NULL) {
+ *tail = rdp->nxtlist;
+ rdp->nxtlist = list;
+ for (count = 0; count < RCU_NEXT_SIZE; count++)
+ if (&rdp->nxtlist == rdp->nxttail[count])
+ rdp->nxttail[count] = tail;
+ else
+ break;
+ }
+
+ /* Reinstate batch limit if we have worked down the excess. */
+ if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
+ rdp->blimit = blimit;
+
+ local_irq_restore(flags);
+
+ /* Re-raise the RCU softirq if there are callbacks remaining. */
+ if (cpu_has_callbacks_ready_to_invoke(rdp))
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Check to see if this CPU is in a non-context-switch quiescent state
+ * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
+ * Also schedule the RCU softirq handler.
+ *
+ * This function must be called with hardirqs disabled. It is normally
+ * invoked from the scheduling-clock interrupt. If rcu_pending returns
+ * false, there is no point in invoking rcu_check_callbacks().
+ */
+void rcu_check_callbacks(int cpu, int user)
+{
+ if (user ||
+ (idle_cpu(cpu) && !in_softirq() &&
+ hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+ /*
+ * Get here if this CPU took its interrupt from user
+ * mode or from the idle loop, and if this is not a
+ * nested interrupt. In this case, the CPU is in
+ * a quiescent state, so count it.
+ *
+ * No memory barrier is required here because both
+ * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference
+ * only CPU-local variables that other CPUs neither
+ * access nor modify, at least not while the corresponding
+ * CPU is online.
+ */
+
+ rcu_qsctr_inc(cpu);
+ rcu_bh_qsctr_inc(cpu);
+
+ } else if (!in_softirq()) {
+
+ /*
+ * Get here if this CPU did not take its interrupt from
+ * softirq, in other words, if it is not interrupting
+ * a rcu_bh read-side critical section. This is an _bh
+ * critical section, so count it.
+ */
+
+ rcu_bh_qsctr_inc(cpu);
+ }
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * Scan the leaf rcu_node structures, processing dyntick state for any that
+ * have not yet encountered a quiescent state, using the function specified.
+ * Returns 1 if the current grace period ends while scanning (possibly
+ * because we made it end).
+ */
+static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
+ int (*f)(struct rcu_data *))
+{
+ unsigned long bit;
+ int cpu;
+ unsigned long flags;
+ unsigned long mask;
+ struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+ struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
+
+ for (; rnp_cur < rnp_end; rnp_cur++) {
+ mask = 0;
+ spin_lock_irqsave(&rnp_cur->lock, flags);
+ if (rsp->completed != lastcomp) {
+ spin_unlock_irqrestore(&rnp_cur->lock, flags);
+ return 1;
+ }
+ if (rnp_cur->qsmask == 0) {
+ spin_unlock_irqrestore(&rnp_cur->lock, flags);
+ continue;
+ }
+ cpu = rnp_cur->grplo;
+ bit = 1;
+ for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) {
+ if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu]))
+ mask |= bit;
+ }
+ if (mask != 0 && rsp->completed == lastcomp) {
+
+ /* cpu_quiet_msk() releases rnp_cur->lock. */
+ cpu_quiet_msk(mask, rsp, rnp_cur, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&rnp_cur->lock, flags);
+ }
+ return 0;
+}
+
+/*
+ * Force quiescent states on reluctant CPUs, and also detect which
+ * CPUs are in dyntick-idle mode.
+ */
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
+{
+ unsigned long flags;
+ long lastcomp;
+ struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+ struct rcu_node *rnp = rcu_get_root(rsp);
+ u8 signaled;
+
+ if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum))
+ return; /* No grace period in progress, nothing to force. */
+ if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
+ rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
+ return; /* Someone else is already on the job. */
+ }
+ if (relaxed &&
+ (long)(rsp->jiffies_force_qs - jiffies) >= 0 &&
+ (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0)
+ goto unlock_ret; /* no emergency and done recently. */
+ rsp->n_force_qs++;
+ spin_lock(&rnp->lock);
+ lastcomp = rsp->completed;
+ signaled = rsp->signaled;
+ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
+ rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+ RCU_JIFFIES_TILL_FORCE_QS;
+ if (lastcomp == rsp->gpnum) {
+ rsp->n_force_qs_ngp++;
+ spin_unlock(&rnp->lock);
+ goto unlock_ret; /* no GP in progress, time updated. */
+ }
+ spin_unlock(&rnp->lock);
+ switch (signaled) {
+ case RCU_GP_INIT:
+
+ break; /* grace period still initializing, ignore. */
+
+ case RCU_SAVE_DYNTICK:
+
+ if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
+ break; /* So gcc recognizes the dead code. */
+
+ /* Record dyntick-idle state. */
+ if (rcu_process_dyntick(rsp, lastcomp,
+ dyntick_save_progress_counter))
+ goto unlock_ret;
+
+ /* Update state, record completion counter. */
+ spin_lock(&rnp->lock);
+ if (lastcomp == rsp->completed) {
+ rsp->signaled = RCU_FORCE_QS;
+ dyntick_record_completed(rsp, lastcomp);
+ }
+ spin_unlock(&rnp->lock);
+ break;
+
+ case RCU_FORCE_QS:
+
+ /* Check dyntick-idle state, send IPI to laggarts. */
+ if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp),
+ rcu_implicit_dynticks_qs))
+ goto unlock_ret;
+
+ /* Leave state in case more forcing is required. */
+
+ break;
+ }
+unlock_ret:
+ spin_unlock_irqrestore(&rsp->fqslock, flags);
+}
+
+#else /* #ifdef CONFIG_SMP */
+
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
+{
+ set_need_resched();
+}
+
+#endif /* #else #ifdef CONFIG_SMP */
+
+/*
+ * This does the RCU processing work from softirq context for the
+ * specified rcu_state and rcu_data structures. This may be called
+ * only from the CPU to whom the rdp belongs.
+ */
+static void
+__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ unsigned long flags;
+
+ /*
+ * If an RCU GP has gone long enough, go check for dyntick
+ * idle CPUs and, if needed, send resched IPIs.
+ */
+ if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+ (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
+ force_quiescent_state(rsp, 1);
+
+ /*
+ * Advance callbacks in response to end of earlier grace
+ * period that some other CPU ended.
+ */
+ rcu_process_gp_end(rsp, rdp);
+
+ /* Update RCU state based on any recent quiescent states. */
+ rcu_check_quiescent_state(rsp, rdp);
+
+ /* Does this CPU require a not-yet-started grace period? */
+ if (cpu_needs_another_gp(rsp, rdp)) {
+ spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
+ rcu_start_gp(rsp, flags); /* releases above lock */
+ }
+
+ /* If there are callbacks ready, invoke them. */
+ rcu_do_batch(rdp);
+}
+
+/*
+ * Do softirq processing for the current CPU.
+ */
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+ /*
+ * Memory references from any prior RCU read-side critical sections
+ * executed by the interrupted code must be seen before any RCU
+ * grace-period manipulations below.
+ */
+ smp_mb(); /* See above block comment. */
+
+ __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+
+ /*
+ * Memory references from any later RCU read-side critical sections
+ * executed by the interrupted code must be seen after any RCU
+ * grace-period manipulations above.
+ */
+ smp_mb(); /* See above block comment. */
+}
+
+static void
+__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
+ struct rcu_state *rsp)
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
+
+ head->func = func;
+ head->next = NULL;
+
+ smp_mb(); /* Ensure RCU update seen before callback registry. */
+
+ /*
+ * Opportunistically note grace-period endings and beginnings.
+ * Note that we might see a beginning right after we see an
+ * end, but never vice versa, since this CPU has to pass through
+ * a quiescent state betweentimes.
+ */
+ local_irq_save(flags);
+ rdp = rsp->rda[smp_processor_id()];
+ rcu_process_gp_end(rsp, rdp);
+ check_for_new_grace_period(rsp, rdp);
+
+ /* Add the callback to our list. */
+ *rdp->nxttail[RCU_NEXT_TAIL] = head;
+ rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
+
+ /* Start a new grace period if one not already started. */
+ if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) {
+ unsigned long nestflag;
+ struct rcu_node *rnp_root = rcu_get_root(rsp);
+
+ spin_lock_irqsave(&rnp_root->lock, nestflag);
+ rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
+ }
+
+ /* Force the grace period if too many callbacks or too long waiting. */
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = LONG_MAX;
+ force_quiescent_state(rsp, 0);
+ } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+ (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
+ force_quiescent_state(rsp, 1);
+ local_irq_restore(flags);
+}
+
+/*
+ * Queue an RCU callback for invocation after a grace period.
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ __call_rcu(head, func, &rcu_state);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/*
+ * Queue an RCU for invocation after a quicker grace period.
+ */
+void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ __call_rcu(head, func, &rcu_bh_state);
+}
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, for the specified type of RCU, returning 1 if so.
+ * The checks are in order of increasing expense: checks that can be
+ * carried out against CPU-local state are performed first. However,
+ * we must check for CPU stalls first, else we might not get a chance.
+ */
+static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ rdp->n_rcu_pending++;
+
+ /* Check for CPU stalls, if enabled. */
+ check_cpu_stall(rsp, rdp);
+
+ /* Is the RCU core waiting for a quiescent state from this CPU? */
+ if (rdp->qs_pending)
+ return 1;
+
+ /* Does this CPU have callbacks ready to invoke? */
+ if (cpu_has_callbacks_ready_to_invoke(rdp))
+ return 1;
+
+ /* Has RCU gone idle with this CPU needing another grace period? */
+ if (cpu_needs_another_gp(rsp, rdp))
+ return 1;
+
+ /* Has another RCU grace period completed? */
+ if (ACCESS_ONCE(rsp->completed) != rdp->completed) /* outside of lock */
+ return 1;
+
+ /* Has a new RCU grace period started? */
+ if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) /* outside of lock */
+ return 1;
+
+ /* Has an RCU GP gone long enough to send resched IPIs &c? */
+ if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
+ ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+ (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0))
+ return 1;
+
+ /* nothing to do */
+ return 0;
+}
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so. This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
+int rcu_pending(int cpu)
+{
+ return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) ||
+ __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so. This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+ /* RCU callbacks either ready or pending? */
+ return per_cpu(rcu_data, cpu).nxtlist ||
+ per_cpu(rcu_bh_data, cpu).nxtlist;
+}
+
+/*
+ * Initialize a CPU's per-CPU RCU data. We take this "scorched earth"
+ * approach so that we don't have to worry about how long the CPU has
+ * been gone, or whether it ever was online previously. We do trust the
+ * ->mynode field, as it is constant for a given struct rcu_data and
+ * initialized during early boot.
+ *
+ * Note that only one online or offline event can be happening at a given
+ * time. Note also that we can accept some slop in the rsp->completed
+ * access due to the fact that this CPU cannot possibly have any RCU
+ * callbacks in flight yet.
+ */
+static void
+rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
+{
+ unsigned long flags;
+ int i;
+ long lastcomp;
+ unsigned long mask;
+ struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_node *rnp = rcu_get_root(rsp);
+
+ /* Set up local state, ensuring consistent view of global state. */
+ spin_lock_irqsave(&rnp->lock, flags);
+ lastcomp = rsp->completed;
+ rdp->completed = lastcomp;
+ rdp->gpnum = lastcomp;
+ rdp->passed_quiesc = 0; /* We could be racing with new GP, */
+ rdp->qs_pending = 1; /* so set up to respond to current GP. */
+ rdp->beenonline = 1; /* We have now been online. */
+ rdp->passed_quiesc_completed = lastcomp - 1;
+ rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
+ rdp->nxtlist = NULL;
+ for (i = 0; i < RCU_NEXT_SIZE; i++)
+ rdp->nxttail[i] = &rdp->nxtlist;
+ rdp->qlen = 0;
+ rdp->blimit = blimit;
+#ifdef CONFIG_NO_HZ
+ rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
+#endif /* #ifdef CONFIG_NO_HZ */
+ rdp->cpu = cpu;
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
+
+ /*
+ * A new grace period might start here. If so, we won't be part
+ * of it, but that is OK, as we are currently in a quiescent state.
+ */
+
+ /* Exclude any attempts to start a new GP on large systems. */
+ spin_lock(&rsp->onofflock); /* irqs already disabled. */
+
+ /* Add CPU to rcu_node bitmasks. */
+ rnp = rdp->mynode;
+ mask = rdp->grpmask;
+ do {
+ /* Exclude any attempts to start a new GP on small systems. */
+ spin_lock(&rnp->lock); /* irqs already disabled. */
+ rnp->qsmaskinit |= mask;
+ mask = rnp->grpmask;
+ spin_unlock(&rnp->lock); /* irqs already disabled. */
+ rnp = rnp->parent;
+ } while (rnp != NULL && !(rnp->qsmaskinit & mask));
+
+ spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
+
+ /*
+ * A new grace period might start here. If so, we will be part of
+ * it, and its gpnum will be greater than ours, so we will
+ * participate. It is also possible for the gpnum to have been
+ * incremented before this function was called, and the bitmasks
+ * to not be filled out until now, in which case we will also
+ * participate due to our gpnum being behind.
+ */
+
+ /* Since it is coming online, the CPU is in a quiescent state. */
+ cpu_quiet(cpu, rsp, rdp, lastcomp);
+ local_irq_restore(flags);
+}
+
+static void __cpuinit rcu_online_cpu(int cpu)
+{
+#ifdef CONFIG_NO_HZ
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+ rdtp->dynticks_nesting = 1;
+ rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */
+ rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1;
+#endif /* #ifdef CONFIG_NO_HZ */
+ rcu_init_percpu_data(cpu, &rcu_state);
+ rcu_init_percpu_data(cpu, &rcu_bh_state);
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
+
+/*
+ * Handle CPU online/offline notifcation events.
+ */
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ rcu_online_cpu(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ rcu_offline_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/*
+ * Compute the per-level fanout, either using the exact fanout specified
+ * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
+ */
+#ifdef CONFIG_RCU_FANOUT_EXACT
+static void __init rcu_init_levelspread(struct rcu_state *rsp)
+{
+ int i;
+
+ for (i = NUM_RCU_LVLS - 1; i >= 0; i--)
+ rsp->levelspread[i] = CONFIG_RCU_FANOUT;
+}
+#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
+static void __init rcu_init_levelspread(struct rcu_state *rsp)
+{
+ int ccur;
+ int cprv;
+ int i;
+
+ cprv = NR_CPUS;
+ for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+ ccur = rsp->levelcnt[i];
+ rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
+ cprv = ccur;
+ }
+}
+#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
+
+/*
+ * Helper function for rcu_init() that initializes one rcu_state structure.
+ */
+static void __init rcu_init_one(struct rcu_state *rsp)
+{
+ int cpustride = 1;
+ int i;
+ int j;
+ struct rcu_node *rnp;
+
+ /* Initialize the level-tracking arrays. */
+
+ for (i = 1; i < NUM_RCU_LVLS; i++)
+ rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
+ rcu_init_levelspread(rsp);
+
+ /* Initialize the elements themselves, starting from the leaves. */
+
+ for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+ cpustride *= rsp->levelspread[i];
+ rnp = rsp->level[i];
+ for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
+ spin_lock_init(&rnp->lock);
+ rnp->qsmask = 0;
+ rnp->qsmaskinit = 0;
+ rnp->grplo = j * cpustride;
+ rnp->grphi = (j + 1) * cpustride - 1;
+ if (rnp->grphi >= NR_CPUS)
+ rnp->grphi = NR_CPUS - 1;
+ if (i == 0) {
+ rnp->grpnum = 0;
+ rnp->grpmask = 0;
+ rnp->parent = NULL;
+ } else {
+ rnp->grpnum = j % rsp->levelspread[i - 1];
+ rnp->grpmask = 1UL << rnp->grpnum;
+ rnp->parent = rsp->level[i - 1] +
+ j / rsp->levelspread[i - 1];
+ }
+ rnp->level = i;
+ }
+ }
+}
+
+/*
+ * Helper macro for __rcu_init(). To be used nowhere else!
+ * Assigns leaf node pointers into each CPU's rcu_data structure.
+ */
+#define RCU_DATA_PTR_INIT(rsp, rcu_data) \
+do { \
+ rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
+ j = 0; \
+ for_each_possible_cpu(i) { \
+ if (i > rnp[j].grphi) \
+ j++; \
+ per_cpu(rcu_data, i).mynode = &rnp[j]; \
+ (rsp)->rda[i] = &per_cpu(rcu_data, i); \
+ } \
+} while (0)
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+ .notifier_call = rcu_cpu_notify,
+};
+
+void __init __rcu_init(void)
+{
+ int i; /* All used by RCU_DATA_PTR_INIT(). */
+ int j;
+ struct rcu_node *rnp;
+
+ printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n");
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+ printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+ rcu_init_one(&rcu_state);
+ RCU_DATA_PTR_INIT(&rcu_state, rcu_data);
+ rcu_init_one(&rcu_bh_state);
+ RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data);
+
+ for_each_online_cpu(i)
+ rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
+ /* Register notifier for non-boot CPUs */
+ register_cpu_notifier(&rcu_nb);
+ printk(KERN_WARNING "Experimental hierarchical RCU init done.\n");
+}
+
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
new file mode 100644
index 000000000000..d6db3e837826
--- /dev/null
+++ b/kernel/rcutree_trace.c
@@ -0,0 +1,271 @@
+/*
+ * Read-Copy Update tracing for classic implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Papers: http://www.rdrop.com/users/paulmck/RCU
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
+{
+ if (!rdp->beenonline)
+ return;
+ seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x",
+ rdp->cpu,
+ cpu_is_offline(rdp->cpu) ? '!' : ' ',
+ rdp->completed, rdp->gpnum,
+ rdp->passed_quiesc, rdp->passed_quiesc_completed,
+ rdp->qs_pending,
+ rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
+ (int)(rdp->n_rcu_pending & 0xffff));
+#ifdef CONFIG_NO_HZ
+ seq_printf(m, " dt=%d/%d dn=%d df=%lu",
+ rdp->dynticks->dynticks,
+ rdp->dynticks->dynticks_nesting,
+ rdp->dynticks->dynticks_nmi,
+ rdp->dynticks_fqs);
+#endif /* #ifdef CONFIG_NO_HZ */
+ seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
+ seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit);
+}
+
+#define PRINT_RCU_DATA(name, func, m) \
+ do { \
+ int _p_r_d_i; \
+ \
+ for_each_possible_cpu(_p_r_d_i) \
+ func(m, &per_cpu(name, _p_r_d_i)); \
+ } while (0)
+
+static int show_rcudata(struct seq_file *m, void *unused)
+{
+ seq_puts(m, "rcu:\n");
+ PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m);
+ seq_puts(m, "rcu_bh:\n");
+ PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
+ return 0;
+}
+
+static int rcudata_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcudata, NULL);
+}
+
+static struct file_operations rcudata_fops = {
+ .owner = THIS_MODULE,
+ .open = rcudata_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
+{
+ if (!rdp->beenonline)
+ return;
+ seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld",
+ rdp->cpu,
+ cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
+ rdp->completed, rdp->gpnum,
+ rdp->passed_quiesc, rdp->passed_quiesc_completed,
+ rdp->qs_pending,
+ rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
+ rdp->n_rcu_pending);
+#ifdef CONFIG_NO_HZ
+ seq_printf(m, ",%d,%d,%d,%lu",
+ rdp->dynticks->dynticks,
+ rdp->dynticks->dynticks_nesting,
+ rdp->dynticks->dynticks_nmi,
+ rdp->dynticks_fqs);
+#endif /* #ifdef CONFIG_NO_HZ */
+ seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
+ seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit);
+}
+
+static int show_rcudata_csv(struct seq_file *m, void *unused)
+{
+ seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\",");
+#ifdef CONFIG_NO_HZ
+ seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
+#endif /* #ifdef CONFIG_NO_HZ */
+ seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
+ seq_puts(m, "\"rcu:\"\n");
+ PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m);
+ seq_puts(m, "\"rcu_bh:\"\n");
+ PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
+ return 0;
+}
+
+static int rcudata_csv_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcudata_csv, NULL);
+}
+
+static struct file_operations rcudata_csv_fops = {
+ .owner = THIS_MODULE,
+ .open = rcudata_csv_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
+{
+ int level = 0;
+ struct rcu_node *rnp;
+
+ seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
+ "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
+ rsp->completed, rsp->gpnum, rsp->signaled,
+ (long)(rsp->jiffies_force_qs - jiffies),
+ (int)(jiffies & 0xffff),
+ rsp->n_force_qs, rsp->n_force_qs_ngp,
+ rsp->n_force_qs - rsp->n_force_qs_ngp,
+ rsp->n_force_qs_lh);
+ for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
+ if (rnp->level != level) {
+ seq_puts(m, "\n");
+ level = rnp->level;
+ }
+ seq_printf(m, "%lx/%lx %d:%d ^%d ",
+ rnp->qsmask, rnp->qsmaskinit,
+ rnp->grplo, rnp->grphi, rnp->grpnum);
+ }
+ seq_puts(m, "\n");
+}
+
+static int show_rcuhier(struct seq_file *m, void *unused)
+{
+ seq_puts(m, "rcu:\n");
+ print_one_rcu_state(m, &rcu_state);
+ seq_puts(m, "rcu_bh:\n");
+ print_one_rcu_state(m, &rcu_bh_state);
+ return 0;
+}
+
+static int rcuhier_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcuhier, NULL);
+}
+
+static struct file_operations rcuhier_fops = {
+ .owner = THIS_MODULE,
+ .open = rcuhier_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int show_rcugp(struct seq_file *m, void *unused)
+{
+ seq_printf(m, "rcu: completed=%ld gpnum=%ld\n",
+ rcu_state.completed, rcu_state.gpnum);
+ seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n",
+ rcu_bh_state.completed, rcu_bh_state.gpnum);
+ return 0;
+}
+
+static int rcugp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcugp, NULL);
+}
+
+static struct file_operations rcugp_fops = {
+ .owner = THIS_MODULE,
+ .open = rcugp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *rcudir, *datadir, *datadir_csv, *hierdir, *gpdir;
+static int __init rcuclassic_trace_init(void)
+{
+ rcudir = debugfs_create_dir("rcu", NULL);
+ if (!rcudir)
+ goto out;
+
+ datadir = debugfs_create_file("rcudata", 0444, rcudir,
+ NULL, &rcudata_fops);
+ if (!datadir)
+ goto free_out;
+
+ datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir,
+ NULL, &rcudata_csv_fops);
+ if (!datadir_csv)
+ goto free_out;
+
+ gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
+ if (!gpdir)
+ goto free_out;
+
+ hierdir = debugfs_create_file("rcuhier", 0444, rcudir,
+ NULL, &rcuhier_fops);
+ if (!hierdir)
+ goto free_out;
+ return 0;
+free_out:
+ if (datadir)
+ debugfs_remove(datadir);
+ if (datadir_csv)
+ debugfs_remove(datadir_csv);
+ if (gpdir)
+ debugfs_remove(gpdir);
+ debugfs_remove(rcudir);
+out:
+ return 1;
+}
+
+static void __exit rcuclassic_trace_cleanup(void)
+{
+ debugfs_remove(datadir);
+ debugfs_remove(datadir_csv);
+ debugfs_remove(gpdir);
+ debugfs_remove(hierdir);
+ debugfs_remove(rcudir);
+}
+
+
+module_init(rcuclassic_trace_init);
+module_exit(rcuclassic_trace_cleanup);
+
+MODULE_AUTHOR("Paul E. McKenney");
+MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation");
+MODULE_LICENSE("GPL");
diff --git a/kernel/resource.c b/kernel/resource.c
index 4337063663ef..e633106b12f6 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -853,6 +853,15 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
continue;
+ /*
+ * if a resource is "BUSY", it's not a hardware resource
+ * but a driver mapping of such a resource; we don't want
+ * to warn for those; some drivers legitimately map only
+ * partial hardware resources. (example: vesafb)
+ */
+ if (p->flags & IORESOURCE_BUSY)
+ continue;
+
printk(KERN_WARNING "resource map sanity check conflict: "
"0x%llx 0x%llx 0x%llx 0x%llx %s\n",
(unsigned long long)addr,
diff --git a/kernel/sched.c b/kernel/sched.c
index 756d981d91a4..27ba1d642f0f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -209,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer;
- rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
}
static inline int rt_bandwidth_enabled(void)
@@ -361,7 +360,9 @@ static inline struct task_group *task_group(struct task_struct *p)
struct task_group *tg;
#ifdef CONFIG_USER_SCHED
- tg = p->user->tg;
+ rcu_read_lock();
+ tg = __task_cred(p)->user->tg;
+ rcu_read_unlock();
#elif defined(CONFIG_CGROUP_SCHED)
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
struct task_group, css);
@@ -610,6 +611,8 @@ struct rq {
#ifdef CONFIG_SCHEDSTATS
/* latency stats */
struct sched_info rq_sched_info;
+ unsigned long long rq_cpu_time;
+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
/* sys_sched_yield() stats */
unsigned int yld_exp_empty;
@@ -1143,7 +1146,6 @@ static void init_rq_hrtick(struct rq *rq)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
- rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
@@ -1871,6 +1873,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
clock_offset = old_rq->clock - new_rq->clock;
+ trace_sched_migrate_task(p, task_cpu(p), new_cpu);
+
#ifdef CONFIG_SCHEDSTATS
if (p->se.wait_start)
p->se.wait_start -= clock_offset;
@@ -2277,6 +2281,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
smp_wmb();
rq = task_rq_lock(p, &flags);
+ update_rq_clock(rq);
old_state = p->state;
if (!(old_state & state))
goto out;
@@ -2334,12 +2339,11 @@ out_activate:
schedstat_inc(p, se.nr_wakeups_local);
else
schedstat_inc(p, se.nr_wakeups_remote);
- update_rq_clock(rq);
activate_task(rq, p, 1);
success = 1;
out_running:
- trace_sched_wakeup(rq, p);
+ trace_sched_wakeup(rq, p, success);
check_preempt_curr(rq, p, sync);
p->state = TASK_RUNNING;
@@ -2472,7 +2476,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
p->sched_class->task_new(rq, p);
inc_nr_running(rq);
}
- trace_sched_wakeup_new(rq, p);
+ trace_sched_wakeup_new(rq, p, 1);
check_preempt_curr(rq, p, 0);
#ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
@@ -2851,7 +2855,6 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
|| unlikely(!cpu_active(dest_cpu)))
goto out;
- trace_sched_migrate_task(rq, p, dest_cpu);
/* force the process onto the specified CPU */
if (migrate_task(p, dest_cpu, &req)) {
/* Need to wait for migration thread (might exit: take ref). */
@@ -5187,6 +5190,22 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
set_load_weight(p);
}
+/*
+ * check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ bool match;
+
+ rcu_read_lock();
+ pcred = __task_cred(p);
+ match = (cred->euid == pcred->euid ||
+ cred->euid == pcred->uid);
+ rcu_read_unlock();
+ return match;
+}
+
static int __sched_setscheduler(struct task_struct *p, int policy,
struct sched_param *param, bool user)
{
@@ -5246,8 +5265,7 @@ recheck:
return -EPERM;
/* can't change other user's priorities */
- if ((current->euid != p->euid) &&
- (current->euid != p->uid))
+ if (!check_same_owner(p))
return -EPERM;
}
@@ -5486,8 +5504,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
goto out_free_cpus_allowed;
}
retval = -EPERM;
- if ((current->euid != p->euid) && (current->euid != p->uid) &&
- !capable(CAP_SYS_NICE))
+ if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
retval = security_task_setscheduler(p, 0, NULL);
@@ -9423,6 +9440,41 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
kfree(ca);
}
+static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
+{
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+ u64 data;
+
+#ifndef CONFIG_64BIT
+ /*
+ * Take rq->lock to make 64-bit read safe on 32-bit platforms.
+ */
+ spin_lock_irq(&cpu_rq(cpu)->lock);
+ data = *cpuusage;
+ spin_unlock_irq(&cpu_rq(cpu)->lock);
+#else
+ data = *cpuusage;
+#endif
+
+ return data;
+}
+
+static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
+{
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+
+#ifndef CONFIG_64BIT
+ /*
+ * Take rq->lock to make 64-bit write safe on 32-bit platforms.
+ */
+ spin_lock_irq(&cpu_rq(cpu)->lock);
+ *cpuusage = val;
+ spin_unlock_irq(&cpu_rq(cpu)->lock);
+#else
+ *cpuusage = val;
+#endif
+}
+
/* return total cpu usage (in nanoseconds) of a group */
static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
{
@@ -9430,17 +9482,8 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
u64 totalcpuusage = 0;
int i;
- for_each_possible_cpu(i) {
- u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
-
- /*
- * Take rq->lock to make 64-bit addition safe on 32-bit
- * platforms.
- */
- spin_lock_irq(&cpu_rq(i)->lock);
- totalcpuusage += *cpuusage;
- spin_unlock_irq(&cpu_rq(i)->lock);
- }
+ for_each_present_cpu(i)
+ totalcpuusage += cpuacct_cpuusage_read(ca, i);
return totalcpuusage;
}
@@ -9457,23 +9500,39 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
goto out;
}
- for_each_possible_cpu(i) {
- u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
+ for_each_present_cpu(i)
+ cpuacct_cpuusage_write(ca, i, 0);
- spin_lock_irq(&cpu_rq(i)->lock);
- *cpuusage = 0;
- spin_unlock_irq(&cpu_rq(i)->lock);
- }
out:
return err;
}
+static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
+ struct seq_file *m)
+{
+ struct cpuacct *ca = cgroup_ca(cgroup);
+ u64 percpu;
+ int i;
+
+ for_each_present_cpu(i) {
+ percpu = cpuacct_cpuusage_read(ca, i);
+ seq_printf(m, "%llu ", (unsigned long long) percpu);
+ }
+ seq_printf(m, "\n");
+ return 0;
+}
+
static struct cftype files[] = {
{
.name = "usage",
.read_u64 = cpuusage_read,
.write_u64 = cpuusage_write,
},
+ {
+ .name = "usage_percpu",
+ .read_seq_string = cpuacct_percpu_seq_read,
+ },
+
};
static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 36b5e34fa99e..56c0efe902a7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -492,6 +492,8 @@ static void update_curr(struct cfs_rq *cfs_rq)
* overflow on 32 bits):
*/
delta_exec = (unsigned long)(now - curr->exec_start);
+ if (!delta_exec)
+ return;
__update_curr(cfs_rq, curr, delta_exec);
curr->exec_start = now;
@@ -1361,12 +1363,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- if (unlikely(rt_prio(p->prio))) {
- struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ update_curr(cfs_rq);
- update_rq_clock(rq);
- update_curr(cfs_rq);
+ if (unlikely(rt_prio(p->prio))) {
resched_task(curr);
return;
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1bbd99014011..833b6d44483c 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -77,7 +77,7 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
}
#define for_each_leaf_rt_rq(rt_rq, rq) \
- list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
+ list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 5fcf0e184586..f2773b5d1226 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -31,7 +31,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
rq->sched_switch, rq->sched_count, rq->sched_goidle,
rq->ttwu_count, rq->ttwu_local,
- rq->rq_sched_info.cpu_time,
+ rq->rq_cpu_time,
rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
seq_printf(seq, "\n");
@@ -124,7 +124,7 @@ static inline void
rq_sched_info_depart(struct rq *rq, unsigned long long delta)
{
if (rq)
- rq->rq_sched_info.cpu_time += delta;
+ rq->rq_cpu_time += delta;
}
static inline void
@@ -237,7 +237,6 @@ static inline void sched_info_depart(struct task_struct *t)
unsigned long long delta = task_rq(t)->clock -
t->sched_info.last_arrival;
- t->sched_info.cpu_time += delta;
rq_sched_info_depart(task_rq(t), delta);
if (t->state == TASK_RUNNING)
diff --git a/kernel/signal.c b/kernel/signal.c
index e9afe63da24b..8e95855ff3cf 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -179,6 +179,11 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
return sig;
}
+/*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appopriate lock must be held to stop the target task from exiting
+ */
static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
int override_rlimit)
{
@@ -186,11 +191,12 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
struct user_struct *user;
/*
- * In order to avoid problems with "switch_user()", we want to make
- * sure that the compiler doesn't re-load "t->user"
+ * We won't get problems with the target's UID changing under us
+ * because changing it requires RCU be used, and if t != current, the
+ * caller must be holding the RCU readlock (by way of a spinlock) and
+ * we use RCU protection here
*/
- user = t->user;
- barrier();
+ user = get_uid(__task_cred(t)->user);
atomic_inc(&user->sigpending);
if (override_rlimit ||
atomic_read(&user->sigpending) <=
@@ -198,12 +204,14 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
q = kmem_cache_alloc(sigqueue_cachep, flags);
if (unlikely(q == NULL)) {
atomic_dec(&user->sigpending);
+ free_uid(user);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = 0;
- q->user = get_uid(user);
+ q->user = user;
}
- return(q);
+
+ return q;
}
static void __sigqueue_free(struct sigqueue *q)
@@ -564,10 +572,12 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
/*
* Bad permissions for sending the signal
+ * - the caller must hold at least the RCU read lock
*/
static int check_kill_permission(int sig, struct siginfo *info,
struct task_struct *t)
{
+ const struct cred *cred = current_cred(), *tcred;
struct pid *sid;
int error;
@@ -581,8 +591,11 @@ static int check_kill_permission(int sig, struct siginfo *info,
if (error)
return error;
- if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
- (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
+ tcred = __task_cred(t);
+ if ((cred->euid ^ tcred->suid) &&
+ (cred->euid ^ tcred->uid) &&
+ (cred->uid ^ tcred->suid) &&
+ (cred->uid ^ tcred->uid) &&
!capable(CAP_KILL)) {
switch (sig) {
case SIGCONT:
@@ -846,7 +859,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
q->info.si_errno = 0;
q->info.si_code = SI_USER;
q->info.si_pid = task_pid_vnr(current);
- q->info.si_uid = current->uid;
+ q->info.si_uid = current_uid();
break;
case (unsigned long) SEND_SIG_PRIV:
q->info.si_signo = sig;
@@ -1010,6 +1023,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
return sighand;
}
+/*
+ * send signal info to all the members of a group
+ * - the caller must hold the RCU read lock at least
+ */
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
unsigned long flags;
@@ -1031,8 +1048,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
/*
* __kill_pgrp_info() sends a signal to a process group: this is what the tty
* control characters do (^C, ^Z etc)
+ * - the caller must hold at least a readlock on tasklist_lock
*/
-
int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
{
struct task_struct *p = NULL;
@@ -1088,6 +1105,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
{
int ret = -EINVAL;
struct task_struct *p;
+ const struct cred *pcred;
if (!valid_signal(sig))
return ret;
@@ -1098,9 +1116,11 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
ret = -ESRCH;
goto out_unlock;
}
- if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
- && (euid != p->suid) && (euid != p->uid)
- && (uid != p->suid) && (uid != p->uid)) {
+ pcred = __task_cred(p);
+ if ((info == SEND_SIG_NOINFO ||
+ (!is_si_special(info) && SI_FROMUSER(info))) &&
+ euid != pcred->suid && euid != pcred->uid &&
+ uid != pcred->suid && uid != pcred->uid) {
ret = -EPERM;
goto out_unlock;
}
@@ -1371,10 +1391,9 @@ int do_notify_parent(struct task_struct *tsk, int sig)
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+ info.si_uid = __task_cred(tsk)->uid;
rcu_read_unlock();
- info.si_uid = tsk->uid;
-
thread_group_cputime(tsk, &cputime);
info.si_utime = cputime_to_jiffies(cputime.utime);
info.si_stime = cputime_to_jiffies(cputime.stime);
@@ -1442,10 +1461,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+ info.si_uid = __task_cred(tsk)->uid;
rcu_read_unlock();
- info.si_uid = tsk->uid;
-
info.si_utime = cputime_to_clock_t(tsk->utime);
info.si_stime = cputime_to_clock_t(tsk->stime);
@@ -1600,7 +1618,7 @@ void ptrace_notify(int exit_code)
info.si_signo = SIGTRAP;
info.si_code = exit_code;
info.si_pid = task_pid_vnr(current);
- info.si_uid = current->uid;
+ info.si_uid = current_uid();
/* Let the debugger run. */
spin_lock_irq(&current->sighand->siglock);
@@ -1712,7 +1730,7 @@ static int ptrace_signal(int signr, siginfo_t *info,
info->si_errno = 0;
info->si_code = SI_USER;
info->si_pid = task_pid_vnr(current->parent);
- info->si_uid = current->parent->uid;
+ info->si_uid = task_uid(current->parent);
}
/* If the (new) signal is now blocked, requeue it. */
@@ -2213,7 +2231,7 @@ sys_kill(pid_t pid, int sig)
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = task_tgid_vnr(current);
- info.si_uid = current->uid;
+ info.si_uid = current_uid();
return kill_something_info(sig, &info, pid);
}
@@ -2230,7 +2248,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig)
info.si_errno = 0;
info.si_code = SI_TKILL;
info.si_pid = task_tgid_vnr(current);
- info.si_uid = current->uid;
+ info.si_uid = current_uid();
rcu_read_lock();
p = find_task_by_vpid(pid);
diff --git a/kernel/smp.c b/kernel/smp.c
index 75c8dde58c55..5cfa0e5e3e88 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -24,8 +24,8 @@ struct call_function_data {
struct call_single_data csd;
spinlock_t lock;
unsigned int refs;
- cpumask_t cpumask;
struct rcu_head rcu_head;
+ unsigned long cpumask_bits[];
};
struct call_single_queue {
@@ -110,13 +110,13 @@ void generic_smp_call_function_interrupt(void)
list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
int refs;
- if (!cpu_isset(cpu, data->cpumask))
+ if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
continue;
data->csd.func(data->csd.info);
spin_lock(&data->lock);
- cpu_clear(cpu, data->cpumask);
+ cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
WARN_ON(data->refs == 0);
data->refs--;
refs = data->refs;
@@ -223,7 +223,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
local_irq_save(flags);
func(info);
local_irq_restore(flags);
- } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) {
+ } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
struct call_single_data *data = NULL;
if (!wait) {
@@ -266,51 +266,19 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
generic_exec_single(cpu, data);
}
-/* Dummy function */
-static void quiesce_dummy(void *unused)
-{
-}
-
-/*
- * Ensure stack based data used in call function mask is safe to free.
- *
- * This is needed by smp_call_function_mask when using on-stack data, because
- * a single call function queue is shared by all CPUs, and any CPU may pick up
- * the data item on the queue at any time before it is deleted. So we need to
- * ensure that all CPUs have transitioned through a quiescent state after
- * this call.
- *
- * This is a very slow function, implemented by sending synchronous IPIs to
- * all possible CPUs. For this reason, we have to alloc data rather than use
- * stack based data even in the case of synchronous calls. The stack based
- * data is then just used for deadlock/oom fallback which will be very rare.
- *
- * If a faster scheme can be made, we could go back to preferring stack based
- * data -- the data allocation/free is non-zero cost.
- */
-static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
-{
- struct call_single_data data;
- int cpu;
-
- data.func = quiesce_dummy;
- data.info = NULL;
-
- for_each_cpu_mask(cpu, mask) {
- data.flags = CSD_FLAG_WAIT;
- generic_exec_single(cpu, &data);
- }
-}
+/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
+#ifndef arch_send_call_function_ipi_mask
+#define arch_send_call_function_ipi_mask(maskp) \
+ arch_send_call_function_ipi(*(maskp))
+#endif
/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on.
+ * smp_call_function_many(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code.
- *
* If @wait is true, then returns once @func has returned. Note that @wait
* will be implicitly turned on in case of allocation failures, since
* we fall back to on-stack allocation.
@@ -319,53 +287,57 @@ static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
*/
-int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
- int wait)
+void smp_call_function_many(const struct cpumask *mask,
+ void (*func)(void *), void *info,
+ bool wait)
{
- struct call_function_data d;
- struct call_function_data *data = NULL;
- cpumask_t allbutself;
+ struct call_function_data *data;
unsigned long flags;
- int cpu, num_cpus;
- int slowpath = 0;
+ int cpu, next_cpu;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
- cpu = smp_processor_id();
- allbutself = cpu_online_map;
- cpu_clear(cpu, allbutself);
- cpus_and(mask, mask, allbutself);
- num_cpus = cpus_weight(mask);
-
- /*
- * If zero CPUs, return. If just a single CPU, turn this request
- * into a targetted single call instead since it's faster.
- */
- if (!num_cpus)
- return 0;
- else if (num_cpus == 1) {
- cpu = first_cpu(mask);
- return smp_call_function_single(cpu, func, info, wait);
+ /* So, what's a CPU they want? Ignoring this one. */
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ if (cpu == smp_processor_id())
+ cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
+ /* No online cpus? We're done. */
+ if (cpu >= nr_cpu_ids)
+ return;
+
+ /* Do we have another CPU which isn't us? */
+ next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
+ if (next_cpu == smp_processor_id())
+ next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
+
+ /* Fastpath: do that cpu by itself. */
+ if (next_cpu >= nr_cpu_ids) {
+ smp_call_function_single(cpu, func, info, wait);
+ return;
}
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
- if (data) {
- data->csd.flags = CSD_FLAG_ALLOC;
- if (wait)
- data->csd.flags |= CSD_FLAG_WAIT;
- } else {
- data = &d;
- data->csd.flags = CSD_FLAG_WAIT;
- wait = 1;
- slowpath = 1;
+ data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
+ if (unlikely(!data)) {
+ /* Slow path. */
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ if (cpumask_test_cpu(cpu, mask))
+ smp_call_function_single(cpu, func, info, wait);
+ }
+ return;
}
spin_lock_init(&data->lock);
+ data->csd.flags = CSD_FLAG_ALLOC;
+ if (wait)
+ data->csd.flags |= CSD_FLAG_WAIT;
data->csd.func = func;
data->csd.info = info;
- data->refs = num_cpus;
- data->cpumask = mask;
+ cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
+ data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
spin_lock_irqsave(&call_function_lock, flags);
list_add_tail_rcu(&data->csd.list, &call_function_queue);
@@ -377,18 +349,13 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
smp_mb();
/* Send a message to all CPUs in the map */
- arch_send_call_function_ipi(mask);
+ arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
/* optionally wait for the CPUs to complete */
- if (wait) {
+ if (wait)
csd_flag_wait(&data->csd);
- if (unlikely(slowpath))
- smp_call_function_mask_quiesce_stack(mask);
- }
-
- return 0;
}
-EXPORT_SYMBOL(smp_call_function_mask);
+EXPORT_SYMBOL(smp_call_function_many);
/**
* smp_call_function(): Run a function on all other CPUs.
@@ -396,7 +363,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code.
+ * Returns 0.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func. In case of allocation
@@ -407,12 +374,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
*/
int smp_call_function(void (*func)(void *), void *info, int wait)
{
- int ret;
-
preempt_disable();
- ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+ smp_call_function_many(cpu_online_mask, func, info, wait);
preempt_enable();
- return ret;
+ return 0;
}
EXPORT_SYMBOL(smp_call_function);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index e7c69a720d69..b7568d7def23 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -102,20 +102,6 @@ void local_bh_disable(void)
EXPORT_SYMBOL(local_bh_disable);
-void __local_bh_enable(void)
-{
- WARN_ON_ONCE(in_irq());
-
- /*
- * softirqs should never be enabled by __local_bh_enable(),
- * it always nests inside local_bh_enable() sections:
- */
- WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
-
- sub_preempt_count(SOFTIRQ_OFFSET);
-}
-EXPORT_SYMBOL_GPL(__local_bh_enable);
-
/*
* Special-case - softirqs can safely be enabled in
* cond_resched_softirq(), or by __do_softirq(),
@@ -269,6 +255,7 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+ rcu_irq_enter();
if (idle_cpu(cpu) && !in_interrupt()) {
__irq_enter();
tick_check_idle(cpu);
@@ -295,9 +282,9 @@ void irq_exit(void)
#ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */
- if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
- tick_nohz_stop_sched_tick(0);
rcu_irq_exit();
+ if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
+ tick_nohz_stop_sched_tick(0);
#endif
preempt_enable_no_resched();
}
@@ -746,7 +733,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(ksoftirqd, hotcpu),
- any_online_cpu(cpu_online_map));
+ cpumask_any(cpu_online_mask));
case CPU_DEAD:
case CPU_DEAD_FROZEN: {
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 1ab790c67b17..d9188c66278a 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -303,17 +303,15 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- check_cpu = any_online_cpu(cpu_online_map);
+ check_cpu = cpumask_any(cpu_online_mask);
wake_up_process(per_cpu(watchdog_task, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (hotcpu == check_cpu) {
- cpumask_t temp_cpu_online_map = cpu_online_map;
-
- cpu_clear(hotcpu, temp_cpu_online_map);
- check_cpu = any_online_cpu(temp_cpu_online_map);
+ /* Pick any other online cpu. */
+ check_cpu = cpumask_any_but(cpu_online_mask, hotcpu);
}
break;
@@ -323,7 +321,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(watchdog_task, hotcpu),
- any_online_cpu(cpu_online_map));
+ cpumask_any(cpu_online_mask));
case CPU_DEAD:
case CPU_DEAD_FROZEN:
p = per_cpu(watchdog_task, hotcpu);
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 94b527ef1d1e..eb212f8f8bc8 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -6,6 +6,7 @@
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/sched.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/stacktrace.h>
@@ -24,3 +25,13 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
}
EXPORT_SYMBOL_GPL(print_stack_trace);
+/*
+ * Architectures that do not implement save_stack_trace_tsk get this
+ * weak alias and a once-per-bootup warning (whenever this facility
+ * is utilized - for example by procfs):
+ */
+__weak void
+save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
+}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 24e8ceacc388..286c41722e8c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -69,10 +69,10 @@ static void stop_cpu(struct work_struct *unused)
int err;
if (!active_cpus) {
- if (cpu == first_cpu(cpu_online_map))
+ if (cpu == cpumask_first(cpu_online_mask))
smdata = &active;
} else {
- if (cpu_isset(cpu, *active_cpus))
+ if (cpumask_test_cpu(cpu, active_cpus))
smdata = &active;
}
/* Simple state machine */
@@ -109,7 +109,7 @@ static int chill(void *unused)
return 0;
}
-int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
+int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
{
struct work_struct *sm_work;
int i, ret;
@@ -142,7 +142,7 @@ int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
return ret;
}
-int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
+int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
{
int ret;
diff --git a/kernel/sys.c b/kernel/sys.c
index 5fc3a0cfb994..d356d79e84ac 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -112,12 +112,17 @@ EXPORT_SYMBOL(cad_pid);
void (*pm_power_off_prepare)(void);
+/*
+ * set the priority of a task
+ * - the caller must hold the RCU read lock
+ */
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
+ const struct cred *cred = current_cred(), *pcred = __task_cred(p);
int no_nice;
- if (p->uid != current->euid &&
- p->euid != current->euid && !capable(CAP_SYS_NICE)) {
+ if (pcred->uid != cred->euid &&
+ pcred->euid != cred->euid && !capable(CAP_SYS_NICE)) {
error = -EPERM;
goto out;
}
@@ -141,6 +146,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
{
struct task_struct *g, *p;
struct user_struct *user;
+ const struct cred *cred = current_cred();
int error = -EINVAL;
struct pid *pgrp;
@@ -174,18 +180,18 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case PRIO_USER:
- user = current->user;
+ user = (struct user_struct *) cred->user;
if (!who)
- who = current->uid;
- else
- if ((who != current->uid) && !(user = find_user(who)))
- goto out_unlock; /* No processes for this user */
+ who = cred->uid;
+ else if ((who != cred->uid) &&
+ !(user = find_user(who)))
+ goto out_unlock; /* No processes for this user */
do_each_thread(g, p)
- if (p->uid == who)
+ if (__task_cred(p)->uid == who)
error = set_one_prio(p, niceval, error);
while_each_thread(g, p);
- if (who != current->uid)
+ if (who != cred->uid)
free_uid(user); /* For find_user() */
break;
}
@@ -205,6 +211,7 @@ asmlinkage long sys_getpriority(int which, int who)
{
struct task_struct *g, *p;
struct user_struct *user;
+ const struct cred *cred = current_cred();
long niceval, retval = -ESRCH;
struct pid *pgrp;
@@ -236,21 +243,21 @@ asmlinkage long sys_getpriority(int which, int who)
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case PRIO_USER:
- user = current->user;
+ user = (struct user_struct *) cred->user;
if (!who)
- who = current->uid;
- else
- if ((who != current->uid) && !(user = find_user(who)))
- goto out_unlock; /* No processes for this user */
+ who = cred->uid;
+ else if ((who != cred->uid) &&
+ !(user = find_user(who)))
+ goto out_unlock; /* No processes for this user */
do_each_thread(g, p)
- if (p->uid == who) {
+ if (__task_cred(p)->uid == who) {
niceval = 20 - task_nice(p);
if (niceval > retval)
retval = niceval;
}
while_each_thread(g, p);
- if (who != current->uid)
+ if (who != cred->uid)
free_uid(user); /* for find_user() */
break;
}
@@ -472,46 +479,48 @@ void ctrl_alt_del(void)
*/
asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
{
- int old_rgid = current->gid;
- int old_egid = current->egid;
- int new_rgid = old_rgid;
- int new_egid = old_egid;
+ const struct cred *old;
+ struct cred *new;
int retval;
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
if (retval)
- return retval;
+ goto error;
+ retval = -EPERM;
if (rgid != (gid_t) -1) {
- if ((old_rgid == rgid) ||
- (current->egid==rgid) ||
+ if (old->gid == rgid ||
+ old->egid == rgid ||
capable(CAP_SETGID))
- new_rgid = rgid;
+ new->gid = rgid;
else
- return -EPERM;
+ goto error;
}
if (egid != (gid_t) -1) {
- if ((old_rgid == egid) ||
- (current->egid == egid) ||
- (current->sgid == egid) ||
+ if (old->gid == egid ||
+ old->egid == egid ||
+ old->sgid == egid ||
capable(CAP_SETGID))
- new_egid = egid;
+ new->egid = egid;
else
- return -EPERM;
- }
- if (new_egid != old_egid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
+ goto error;
}
+
if (rgid != (gid_t) -1 ||
- (egid != (gid_t) -1 && egid != old_rgid))
- current->sgid = new_egid;
- current->fsgid = new_egid;
- current->egid = new_egid;
- current->gid = new_rgid;
- key_fsgid_changed(current);
- proc_id_connector(current, PROC_EVENT_GID);
- return 0;
+ (egid != (gid_t) -1 && egid != old->gid))
+ new->sgid = new->egid;
+ new->fsgid = new->egid;
+
+ return commit_creds(new);
+
+error:
+ abort_creds(new);
+ return retval;
}
/*
@@ -521,56 +530,54 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
*/
asmlinkage long sys_setgid(gid_t gid)
{
- int old_egid = current->egid;
+ const struct cred *old;
+ struct cred *new;
int retval;
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
if (retval)
- return retval;
+ goto error;
- if (capable(CAP_SETGID)) {
- if (old_egid != gid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
- }
- current->gid = current->egid = current->sgid = current->fsgid = gid;
- } else if ((gid == current->gid) || (gid == current->sgid)) {
- if (old_egid != gid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
- }
- current->egid = current->fsgid = gid;
- }
+ retval = -EPERM;
+ if (capable(CAP_SETGID))
+ new->gid = new->egid = new->sgid = new->fsgid = gid;
+ else if (gid == old->gid || gid == old->sgid)
+ new->egid = new->fsgid = gid;
else
- return -EPERM;
+ goto error;
- key_fsgid_changed(current);
- proc_id_connector(current, PROC_EVENT_GID);
- return 0;
+ return commit_creds(new);
+
+error:
+ abort_creds(new);
+ return retval;
}
-static int set_user(uid_t new_ruid, int dumpclear)
+/*
+ * change the user struct in a credentials set to match the new UID
+ */
+static int set_user(struct cred *new)
{
struct user_struct *new_user;
- new_user = alloc_uid(current->nsproxy->user_ns, new_ruid);
+ new_user = alloc_uid(current_user_ns(), new->uid);
if (!new_user)
return -EAGAIN;
if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
- new_user != current->nsproxy->user_ns->root_user) {
+ new_user != INIT_USER) {
free_uid(new_user);
return -EAGAIN;
}
- switch_uid(new_user);
-
- if (dumpclear) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
- }
- current->uid = new_ruid;
+ free_uid(new->user);
+ new->user = new_user;
return 0;
}
@@ -591,54 +598,56 @@ static int set_user(uid_t new_ruid, int dumpclear)
*/
asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
{
- int old_ruid, old_euid, old_suid, new_ruid, new_euid;
+ const struct cred *old;
+ struct cred *new;
int retval;
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
if (retval)
- return retval;
-
- new_ruid = old_ruid = current->uid;
- new_euid = old_euid = current->euid;
- old_suid = current->suid;
+ goto error;
+ retval = -EPERM;
if (ruid != (uid_t) -1) {
- new_ruid = ruid;
- if ((old_ruid != ruid) &&
- (current->euid != ruid) &&
+ new->uid = ruid;
+ if (old->uid != ruid &&
+ old->euid != ruid &&
!capable(CAP_SETUID))
- return -EPERM;
+ goto error;
}
if (euid != (uid_t) -1) {
- new_euid = euid;
- if ((old_ruid != euid) &&
- (current->euid != euid) &&
- (current->suid != euid) &&
+ new->euid = euid;
+ if (old->uid != euid &&
+ old->euid != euid &&
+ old->suid != euid &&
!capable(CAP_SETUID))
- return -EPERM;
+ goto error;
}
- if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
- return -EAGAIN;
+ retval = -EAGAIN;
+ if (new->uid != old->uid && set_user(new) < 0)
+ goto error;
- if (new_euid != old_euid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
- }
- current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
- (euid != (uid_t) -1 && euid != old_ruid))
- current->suid = current->euid;
- current->fsuid = current->euid;
+ (euid != (uid_t) -1 && euid != old->uid))
+ new->suid = new->euid;
+ new->fsuid = new->euid;
- key_fsuid_changed(current);
- proc_id_connector(current, PROC_EVENT_UID);
-
- return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
-}
+ retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
+ if (retval < 0)
+ goto error;
+ return commit_creds(new);
+error:
+ abort_creds(new);
+ return retval;
+}
/*
* setuid() is implemented like SysV with SAVED_IDS
@@ -653,36 +662,41 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
*/
asmlinkage long sys_setuid(uid_t uid)
{
- int old_euid = current->euid;
- int old_ruid, old_suid, new_suid;
+ const struct cred *old;
+ struct cred *new;
int retval;
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
if (retval)
- return retval;
+ goto error;
- old_ruid = current->uid;
- old_suid = current->suid;
- new_suid = old_suid;
-
+ retval = -EPERM;
if (capable(CAP_SETUID)) {
- if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
- return -EAGAIN;
- new_suid = uid;
- } else if ((uid != current->uid) && (uid != new_suid))
- return -EPERM;
-
- if (old_euid != uid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
+ new->suid = new->uid = uid;
+ if (uid != old->uid && set_user(new) < 0) {
+ retval = -EAGAIN;
+ goto error;
+ }
+ } else if (uid != old->uid && uid != new->suid) {
+ goto error;
}
- current->fsuid = current->euid = uid;
- current->suid = new_suid;
- key_fsuid_changed(current);
- proc_id_connector(current, PROC_EVENT_UID);
+ new->fsuid = new->euid = uid;
+
+ retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
+ if (retval < 0)
+ goto error;
- return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
+ return commit_creds(new);
+
+error:
+ abort_creds(new);
+ return retval;
}
@@ -692,54 +706,63 @@ asmlinkage long sys_setuid(uid_t uid)
*/
asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
{
- int old_ruid = current->uid;
- int old_euid = current->euid;
- int old_suid = current->suid;
+ const struct cred *old;
+ struct cred *new;
int retval;
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
if (retval)
- return retval;
+ goto error;
+ old = current_cred();
+ retval = -EPERM;
if (!capable(CAP_SETUID)) {
- if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
- (ruid != current->euid) && (ruid != current->suid))
- return -EPERM;
- if ((euid != (uid_t) -1) && (euid != current->uid) &&
- (euid != current->euid) && (euid != current->suid))
- return -EPERM;
- if ((suid != (uid_t) -1) && (suid != current->uid) &&
- (suid != current->euid) && (suid != current->suid))
- return -EPERM;
+ if (ruid != (uid_t) -1 && ruid != old->uid &&
+ ruid != old->euid && ruid != old->suid)
+ goto error;
+ if (euid != (uid_t) -1 && euid != old->uid &&
+ euid != old->euid && euid != old->suid)
+ goto error;
+ if (suid != (uid_t) -1 && suid != old->uid &&
+ suid != old->euid && suid != old->suid)
+ goto error;
}
+
+ retval = -EAGAIN;
if (ruid != (uid_t) -1) {
- if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
- return -EAGAIN;
+ new->uid = ruid;
+ if (ruid != old->uid && set_user(new) < 0)
+ goto error;
}
- if (euid != (uid_t) -1) {
- if (euid != current->euid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
- }
- current->euid = euid;
- }
- current->fsuid = current->euid;
+ if (euid != (uid_t) -1)
+ new->euid = euid;
if (suid != (uid_t) -1)
- current->suid = suid;
+ new->suid = suid;
+ new->fsuid = new->euid;
+
+ retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
+ if (retval < 0)
+ goto error;
- key_fsuid_changed(current);
- proc_id_connector(current, PROC_EVENT_UID);
+ return commit_creds(new);
- return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
+error:
+ abort_creds(new);
+ return retval;
}
asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
{
+ const struct cred *cred = current_cred();
int retval;
- if (!(retval = put_user(current->uid, ruid)) &&
- !(retval = put_user(current->euid, euid)))
- retval = put_user(current->suid, suid);
+ if (!(retval = put_user(cred->uid, ruid)) &&
+ !(retval = put_user(cred->euid, euid)))
+ retval = put_user(cred->suid, suid);
return retval;
}
@@ -749,48 +772,55 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us
*/
asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
{
+ const struct cred *old;
+ struct cred *new;
int retval;
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
if (retval)
- return retval;
+ goto error;
+ retval = -EPERM;
if (!capable(CAP_SETGID)) {
- if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
- (rgid != current->egid) && (rgid != current->sgid))
- return -EPERM;
- if ((egid != (gid_t) -1) && (egid != current->gid) &&
- (egid != current->egid) && (egid != current->sgid))
- return -EPERM;
- if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
- (sgid != current->egid) && (sgid != current->sgid))
- return -EPERM;
+ if (rgid != (gid_t) -1 && rgid != old->gid &&
+ rgid != old->egid && rgid != old->sgid)
+ goto error;
+ if (egid != (gid_t) -1 && egid != old->gid &&
+ egid != old->egid && egid != old->sgid)
+ goto error;
+ if (sgid != (gid_t) -1 && sgid != old->gid &&
+ sgid != old->egid && sgid != old->sgid)
+ goto error;
}
- if (egid != (gid_t) -1) {
- if (egid != current->egid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
- }
- current->egid = egid;
- }
- current->fsgid = current->egid;
+
if (rgid != (gid_t) -1)
- current->gid = rgid;
+ new->gid = rgid;
+ if (egid != (gid_t) -1)
+ new->egid = egid;
if (sgid != (gid_t) -1)
- current->sgid = sgid;
+ new->sgid = sgid;
+ new->fsgid = new->egid;
- key_fsgid_changed(current);
- proc_id_connector(current, PROC_EVENT_GID);
- return 0;
+ return commit_creds(new);
+
+error:
+ abort_creds(new);
+ return retval;
}
asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
{
+ const struct cred *cred = current_cred();
int retval;
- if (!(retval = put_user(current->gid, rgid)) &&
- !(retval = put_user(current->egid, egid)))
- retval = put_user(current->sgid, sgid);
+ if (!(retval = put_user(cred->gid, rgid)) &&
+ !(retval = put_user(cred->egid, egid)))
+ retval = put_user(cred->sgid, sgid);
return retval;
}
@@ -804,27 +834,35 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us
*/
asmlinkage long sys_setfsuid(uid_t uid)
{
- int old_fsuid;
+ const struct cred *old;
+ struct cred *new;
+ uid_t old_fsuid;
- old_fsuid = current->fsuid;
- if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
- return old_fsuid;
+ new = prepare_creds();
+ if (!new)
+ return current_fsuid();
+ old = current_cred();
+ old_fsuid = old->fsuid;
- if (uid == current->uid || uid == current->euid ||
- uid == current->suid || uid == current->fsuid ||
+ if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
+ goto error;
+
+ if (uid == old->uid || uid == old->euid ||
+ uid == old->suid || uid == old->fsuid ||
capable(CAP_SETUID)) {
if (uid != old_fsuid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
+ new->fsuid = uid;
+ if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
+ goto change_okay;
}
- current->fsuid = uid;
}
- key_fsuid_changed(current);
- proc_id_connector(current, PROC_EVENT_UID);
-
- security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
+error:
+ abort_creds(new);
+ return old_fsuid;
+change_okay:
+ commit_creds(new);
return old_fsuid;
}
@@ -833,23 +871,34 @@ asmlinkage long sys_setfsuid(uid_t uid)
*/
asmlinkage long sys_setfsgid(gid_t gid)
{
- int old_fsgid;
+ const struct cred *old;
+ struct cred *new;
+ gid_t old_fsgid;
+
+ new = prepare_creds();
+ if (!new)
+ return current_fsgid();
+ old = current_cred();
+ old_fsgid = old->fsgid;
- old_fsgid = current->fsgid;
if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
- return old_fsgid;
+ goto error;
- if (gid == current->gid || gid == current->egid ||
- gid == current->sgid || gid == current->fsgid ||
+ if (gid == old->gid || gid == old->egid ||
+ gid == old->sgid || gid == old->fsgid ||
capable(CAP_SETGID)) {
if (gid != old_fsgid) {
- set_dumpable(current->mm, suid_dumpable);
- smp_wmb();
+ new->fsgid = gid;
+ goto change_okay;
}
- current->fsgid = gid;
- key_fsgid_changed(current);
- proc_id_connector(current, PROC_EVENT_GID);
}
+
+error:
+ abort_creds(new);
+ return old_fsgid;
+
+change_okay:
+ commit_creds(new);
return old_fsgid;
}
@@ -1118,7 +1167,7 @@ EXPORT_SYMBOL(groups_free);
/* export the group_info to a user-space array */
static int groups_to_user(gid_t __user *grouplist,
- struct group_info *group_info)
+ const struct group_info *group_info)
{
int i;
unsigned int count = group_info->ngroups;
@@ -1186,7 +1235,7 @@ static void groups_sort(struct group_info *group_info)
}
/* a simple bsearch */
-int groups_search(struct group_info *group_info, gid_t grp)
+int groups_search(const struct group_info *group_info, gid_t grp)
{
unsigned int left, right;
@@ -1208,51 +1257,74 @@ int groups_search(struct group_info *group_info, gid_t grp)
return 0;
}
-/* validate and set current->group_info */
-int set_current_groups(struct group_info *group_info)
+/**
+ * set_groups - Change a group subscription in a set of credentials
+ * @new: The newly prepared set of credentials to alter
+ * @group_info: The group list to install
+ *
+ * Validate a group subscription and, if valid, insert it into a set
+ * of credentials.
+ */
+int set_groups(struct cred *new, struct group_info *group_info)
{
int retval;
- struct group_info *old_info;
retval = security_task_setgroups(group_info);
if (retval)
return retval;
+ put_group_info(new->group_info);
groups_sort(group_info);
get_group_info(group_info);
+ new->group_info = group_info;
+ return 0;
+}
+
+EXPORT_SYMBOL(set_groups);
- task_lock(current);
- old_info = current->group_info;
- current->group_info = group_info;
- task_unlock(current);
+/**
+ * set_current_groups - Change current's group subscription
+ * @group_info: The group list to impose
+ *
+ * Validate a group subscription and, if valid, impose it upon current's task
+ * security record.
+ */
+int set_current_groups(struct group_info *group_info)
+{
+ struct cred *new;
+ int ret;
- put_group_info(old_info);
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
- return 0;
+ ret = set_groups(new, group_info);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
}
EXPORT_SYMBOL(set_current_groups);
asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
{
- int i = 0;
-
- /*
- * SMP: Nobody else can change our grouplist. Thus we are
- * safe.
- */
+ const struct cred *cred = current_cred();
+ int i;
if (gidsetsize < 0)
return -EINVAL;
/* no need to grab task_lock here; it cannot change */
- i = current->group_info->ngroups;
+ i = cred->group_info->ngroups;
if (gidsetsize) {
if (i > gidsetsize) {
i = -EINVAL;
goto out;
}
- if (groups_to_user(grouplist, current->group_info)) {
+ if (groups_to_user(grouplist, cred->group_info)) {
i = -EFAULT;
goto out;
}
@@ -1296,9 +1368,11 @@ asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
*/
int in_group_p(gid_t grp)
{
+ const struct cred *cred = current_cred();
int retval = 1;
- if (grp != current->fsgid)
- retval = groups_search(current->group_info, grp);
+
+ if (grp != cred->fsgid)
+ retval = groups_search(cred->group_info, grp);
return retval;
}
@@ -1306,9 +1380,11 @@ EXPORT_SYMBOL(in_group_p);
int in_egroup_p(gid_t grp)
{
+ const struct cred *cred = current_cred();
int retval = 1;
- if (grp != current->egid)
- retval = groups_search(current->group_info, grp);
+
+ if (grp != cred->egid)
+ retval = groups_search(cred->group_info, grp);
return retval;
}
@@ -1624,50 +1700,56 @@ asmlinkage long sys_umask(int mask)
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
- long error = 0;
+ struct task_struct *me = current;
+ unsigned char comm[sizeof(me->comm)];
+ long error;
- if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
+ error = security_task_prctl(option, arg2, arg3, arg4, arg5);
+ if (error != -ENOSYS)
return error;
+ error = 0;
switch (option) {
case PR_SET_PDEATHSIG:
if (!valid_signal(arg2)) {
error = -EINVAL;
break;
}
- current->pdeath_signal = arg2;
+ me->pdeath_signal = arg2;
+ error = 0;
break;
case PR_GET_PDEATHSIG:
- error = put_user(current->pdeath_signal, (int __user *)arg2);
+ error = put_user(me->pdeath_signal, (int __user *)arg2);
break;
case PR_GET_DUMPABLE:
- error = get_dumpable(current->mm);
+ error = get_dumpable(me->mm);
break;
case PR_SET_DUMPABLE:
if (arg2 < 0 || arg2 > 1) {
error = -EINVAL;
break;
}
- set_dumpable(current->mm, arg2);
+ set_dumpable(me->mm, arg2);
+ error = 0;
break;
case PR_SET_UNALIGN:
- error = SET_UNALIGN_CTL(current, arg2);
+ error = SET_UNALIGN_CTL(me, arg2);
break;
case PR_GET_UNALIGN:
- error = GET_UNALIGN_CTL(current, arg2);
+ error = GET_UNALIGN_CTL(me, arg2);
break;
case PR_SET_FPEMU:
- error = SET_FPEMU_CTL(current, arg2);
+ error = SET_FPEMU_CTL(me, arg2);
break;
case PR_GET_FPEMU:
- error = GET_FPEMU_CTL(current, arg2);
+ error = GET_FPEMU_CTL(me, arg2);
break;
case PR_SET_FPEXC:
- error = SET_FPEXC_CTL(current, arg2);
+ error = SET_FPEXC_CTL(me, arg2);
break;
case PR_GET_FPEXC:
- error = GET_FPEXC_CTL(current, arg2);
+ error = GET_FPEXC_CTL(me, arg2);
break;
case PR_GET_TIMING:
error = PR_TIMING_STATISTICAL;
@@ -1675,33 +1757,28 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
case PR_SET_TIMING:
if (arg2 != PR_TIMING_STATISTICAL)
error = -EINVAL;
+ else
+ error = 0;
break;
- case PR_SET_NAME: {
- struct task_struct *me = current;
- unsigned char ncomm[sizeof(me->comm)];
-
- ncomm[sizeof(me->comm)-1] = 0;
- if (strncpy_from_user(ncomm, (char __user *)arg2,
- sizeof(me->comm)-1) < 0)
+ case PR_SET_NAME:
+ comm[sizeof(me->comm)-1] = 0;
+ if (strncpy_from_user(comm, (char __user *)arg2,
+ sizeof(me->comm) - 1) < 0)
return -EFAULT;
- set_task_comm(me, ncomm);
+ set_task_comm(me, comm);
return 0;
- }
- case PR_GET_NAME: {
- struct task_struct *me = current;
- unsigned char tcomm[sizeof(me->comm)];
-
- get_task_comm(tcomm, me);
- if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
+ case PR_GET_NAME:
+ get_task_comm(comm, me);
+ if (copy_to_user((char __user *)arg2, comm,
+ sizeof(comm)))
return -EFAULT;
return 0;
- }
case PR_GET_ENDIAN:
- error = GET_ENDIAN(current, arg2);
+ error = GET_ENDIAN(me, arg2);
break;
case PR_SET_ENDIAN:
- error = SET_ENDIAN(current, arg2);
+ error = SET_ENDIAN(me, arg2);
break;
case PR_GET_SECCOMP:
@@ -1725,6 +1802,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
current->default_timer_slack_ns;
else
current->timer_slack_ns = arg2;
+ error = 0;
break;
default:
error = -EINVAL;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c83f566e940a..ff6d45c7626f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -121,6 +121,10 @@ extern int sg_big_buff;
#include <asm/system.h>
#endif
+#ifdef CONFIG_SPARC64
+extern int sysctl_tsb_ratio;
+#endif
+
#ifdef __hppa__
extern int pwrsw_enabled;
extern int unaligned_enabled;
@@ -451,6 +455,16 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+#ifdef CONFIG_SPARC64
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "tsb-ratio",
+ .data = &sysctl_tsb_ratio,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
#ifdef __hppa__
{
.ctl_name = KERN_HPPA_PWRSW,
@@ -487,6 +501,16 @@ static struct ctl_table kern_table[] = {
.proc_handler = &ftrace_enable_sysctl,
},
#endif
+#ifdef CONFIG_STACK_TRACER
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "stack_tracer_enabled",
+ .data = &stack_tracer_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &stack_trace_sysctl,
+ },
+#endif
#ifdef CONFIG_TRACING
{
.ctl_name = CTL_UNNUMBERED,
@@ -1661,7 +1685,7 @@ out:
static int test_perm(int mode, int op)
{
- if (!current->euid)
+ if (!current_euid())
mode >>= 6;
else if (in_egroup_p(0))
mode >>= 3;
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index c35da23ab8fb..fafeb48f27c0 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -730,7 +730,6 @@ static const struct trans_ctl_table trans_fs_quota_table[] = {
};
static const struct trans_ctl_table trans_fs_xfs_table[] = {
- { XFS_RESTRICT_CHOWN, "restrict_chown" },
{ XFS_SGID_INHERIT, "irix_sgid_inherit" },
{ XFS_SYMLINK_MODE, "irix_symlink_mode" },
{ XFS_PANIC_MASK, "panic_mask" },
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 6d7dc4ec4aa5..888adbcca30c 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -290,18 +290,17 @@ ret:
return;
}
-static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
+static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
{
struct listener_list *listeners;
struct listener *s, *tmp;
unsigned int cpu;
- cpumask_t mask = *maskp;
- if (!cpus_subset(mask, cpu_possible_map))
+ if (!cpumask_subset(mask, cpu_possible_mask))
return -EINVAL;
if (isadd == REGISTER) {
- for_each_cpu_mask_nr(cpu, mask) {
+ for_each_cpu(cpu, mask) {
s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
cpu_to_node(cpu));
if (!s)
@@ -320,7 +319,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
/* Deregister or cleanup */
cleanup:
- for_each_cpu_mask_nr(cpu, mask) {
+ for_each_cpu(cpu, mask) {
listeners = &per_cpu(listener_array, cpu);
down_write(&listeners->sem);
list_for_each_entry_safe(s, tmp, &listeners->list, list) {
@@ -335,7 +334,7 @@ cleanup:
return 0;
}
-static int parse(struct nlattr *na, cpumask_t *mask)
+static int parse(struct nlattr *na, struct cpumask *mask)
{
char *data;
int len;
@@ -428,23 +427,33 @@ err:
static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
{
- int rc = 0;
+ int rc;
struct sk_buff *rep_skb;
struct taskstats *stats;
size_t size;
- cpumask_t mask;
+ cpumask_var_t mask;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
- rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
+ rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
if (rc < 0)
- return rc;
- if (rc == 0)
- return add_del_listener(info->snd_pid, &mask, REGISTER);
+ goto free_return_rc;
+ if (rc == 0) {
+ rc = add_del_listener(info->snd_pid, mask, REGISTER);
+ goto free_return_rc;
+ }
- rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
+ rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
if (rc < 0)
+ goto free_return_rc;
+ if (rc == 0) {
+ rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
+free_return_rc:
+ free_cpumask_var(mask);
return rc;
- if (rc == 0)
- return add_del_listener(info->snd_pid, &mask, DEREGISTER);
+ }
+ free_cpumask_var(mask);
/*
* Size includes space for nested attributes
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 9ed2eec97526..ca89e1593f08 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,10 +145,11 @@ static void clocksource_watchdog(unsigned long data)
* Cycle through CPUs to check if the CPUs stay
* synchronized to each other.
*/
- int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
+ int next_cpu = cpumask_next(raw_smp_processor_id(),
+ cpu_online_mask);
if (next_cpu >= nr_cpu_ids)
- next_cpu = first_cpu(cpu_online_map);
+ next_cpu = cpumask_first(cpu_online_mask);
watchdog_timer.expires += WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer, next_cpu);
}
@@ -173,7 +174,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
watchdog_last = watchdog->read();
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer,
- first_cpu(cpu_online_map));
+ cpumask_first(cpu_online_mask));
}
} else {
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
@@ -195,7 +196,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
watchdog_timer.expires =
jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer,
- first_cpu(cpu_online_map));
+ cpumask_first(cpu_online_mask));
}
}
}
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8ff15e5d486b..f5f793d92415 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -131,7 +131,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
{
enum hrtimer_restart res = HRTIMER_NORESTART;
- write_seqlock_irq(&xtime_lock);
+ write_seqlock(&xtime_lock);
switch (time_state) {
case TIME_OK:
@@ -164,7 +164,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
}
update_vsyscall(&xtime, clock);
- write_sequnlock_irq(&xtime_lock);
+ write_sequnlock(&xtime_lock);
return res;
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 9590af2327be..118a3b3b3f9a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -28,7 +28,9 @@
*/
struct tick_device tick_broadcast_device;
-static cpumask_t tick_broadcast_mask;
+/* FIXME: Use cpumask_var_t. */
+static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
+static DECLARE_BITMAP(tmpmask, NR_CPUS);
static DEFINE_SPINLOCK(tick_broadcast_lock);
static int tick_broadcast_force;
@@ -46,9 +48,9 @@ struct tick_device *tick_get_broadcast_device(void)
return &tick_broadcast_device;
}
-cpumask_t *tick_get_broadcast_mask(void)
+struct cpumask *tick_get_broadcast_mask(void)
{
- return &tick_broadcast_mask;
+ return to_cpumask(tick_broadcast_mask);
}
/*
@@ -72,7 +74,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
clockevents_exchange_device(NULL, dev);
tick_broadcast_device.evtdev = dev;
- if (!cpus_empty(tick_broadcast_mask))
+ if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
return 1;
}
@@ -104,7 +106,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
*/
if (!tick_device_is_functional(dev)) {
dev->event_handler = tick_handle_periodic;
- cpu_set(cpu, tick_broadcast_mask);
+ cpumask_set_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
ret = 1;
} else {
@@ -116,7 +118,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
int cpu = smp_processor_id();
- cpu_clear(cpu, tick_broadcast_mask);
+ cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_clear_oneshot(cpu);
}
}
@@ -125,9 +127,9 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
}
/*
- * Broadcast the event to the cpus, which are set in the mask
+ * Broadcast the event to the cpus, which are set in the mask (mangled).
*/
-static void tick_do_broadcast(cpumask_t mask)
+static void tick_do_broadcast(struct cpumask *mask)
{
int cpu = smp_processor_id();
struct tick_device *td;
@@ -135,22 +137,21 @@ static void tick_do_broadcast(cpumask_t mask)
/*
* Check, if the current cpu is in the mask
*/
- if (cpu_isset(cpu, mask)) {
- cpu_clear(cpu, mask);
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
td = &per_cpu(tick_cpu_device, cpu);
td->evtdev->event_handler(td->evtdev);
}
- if (!cpus_empty(mask)) {
+ if (!cpumask_empty(mask)) {
/*
* It might be necessary to actually check whether the devices
* have different broadcast functions. For now, just use the
* one of the first device. This works as long as we have this
* misfeature only on x86 (lapic)
*/
- cpu = first_cpu(mask);
- td = &per_cpu(tick_cpu_device, cpu);
- td->evtdev->broadcast(&mask);
+ td = &per_cpu(tick_cpu_device, cpumask_first(mask));
+ td->evtdev->broadcast(mask);
}
}
@@ -160,12 +161,11 @@ static void tick_do_broadcast(cpumask_t mask)
*/
static void tick_do_periodic_broadcast(void)
{
- cpumask_t mask;
-
spin_lock(&tick_broadcast_lock);
- cpus_and(mask, cpu_online_map, tick_broadcast_mask);
- tick_do_broadcast(mask);
+ cpumask_and(to_cpumask(tmpmask),
+ cpu_online_mask, tick_get_broadcast_mask());
+ tick_do_broadcast(to_cpumask(tmpmask));
spin_unlock(&tick_broadcast_lock);
}
@@ -228,13 +228,13 @@ static void tick_do_broadcast_on_off(void *why)
if (!tick_device_is_functional(dev))
goto out;
- bc_stopped = cpus_empty(tick_broadcast_mask);
+ bc_stopped = cpumask_empty(tick_get_broadcast_mask());
switch (*reason) {
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
- if (!cpu_isset(cpu, tick_broadcast_mask)) {
- cpu_set(cpu, tick_broadcast_mask);
+ if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
+ cpumask_set_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev);
@@ -244,8 +244,8 @@ static void tick_do_broadcast_on_off(void *why)
break;
case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
if (!tick_broadcast_force &&
- cpu_isset(cpu, tick_broadcast_mask)) {
- cpu_clear(cpu, tick_broadcast_mask);
+ cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
+ cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0);
@@ -253,7 +253,7 @@ static void tick_do_broadcast_on_off(void *why)
break;
}
- if (cpus_empty(tick_broadcast_mask)) {
+ if (cpumask_empty(tick_get_broadcast_mask())) {
if (!bc_stopped)
clockevents_shutdown(bc);
} else if (bc_stopped) {
@@ -272,7 +272,7 @@ out:
*/
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
- if (!cpu_isset(*oncpu, cpu_online_map))
+ if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu);
else
@@ -303,10 +303,10 @@ void tick_shutdown_broadcast(unsigned int *cpup)
spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
- cpu_clear(cpu, tick_broadcast_mask);
+ cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
- if (bc && cpus_empty(tick_broadcast_mask))
+ if (bc && cpumask_empty(tick_get_broadcast_mask()))
clockevents_shutdown(bc);
}
@@ -342,10 +342,10 @@ int tick_resume_broadcast(void)
switch (tick_broadcast_device.mode) {
case TICKDEV_MODE_PERIODIC:
- if(!cpus_empty(tick_broadcast_mask))
+ if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(bc);
- broadcast = cpu_isset(smp_processor_id(),
- tick_broadcast_mask);
+ broadcast = cpumask_test_cpu(smp_processor_id(),
+ tick_get_broadcast_mask());
break;
case TICKDEV_MODE_ONESHOT:
broadcast = tick_resume_broadcast_oneshot(bc);
@@ -360,14 +360,15 @@ int tick_resume_broadcast(void)
#ifdef CONFIG_TICK_ONESHOT
-static cpumask_t tick_broadcast_oneshot_mask;
+/* FIXME: use cpumask_var_t. */
+static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
/*
- * Debugging: see timer_list.c
+ * Exposed for debugging: see timer_list.c
*/
-cpumask_t *tick_get_broadcast_oneshot_mask(void)
+struct cpumask *tick_get_broadcast_oneshot_mask(void)
{
- return &tick_broadcast_oneshot_mask;
+ return to_cpumask(tick_broadcast_oneshot_mask);
}
static int tick_broadcast_set_event(ktime_t expires, int force)
@@ -389,7 +390,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
*/
void tick_check_oneshot_broadcast(int cpu)
{
- if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
+ if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
@@ -402,7 +403,6 @@ void tick_check_oneshot_broadcast(int cpu)
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
- cpumask_t mask;
ktime_t now, next_event;
int cpu;
@@ -410,13 +410,13 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
again:
dev->next_event.tv64 = KTIME_MAX;
next_event.tv64 = KTIME_MAX;
- mask = CPU_MASK_NONE;
+ cpumask_clear(to_cpumask(tmpmask));
now = ktime_get();
/* Find all expired events */
- for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
+ for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event.tv64 <= now.tv64)
- cpu_set(cpu, mask);
+ cpumask_set_cpu(cpu, to_cpumask(tmpmask));
else if (td->evtdev->next_event.tv64 < next_event.tv64)
next_event.tv64 = td->evtdev->next_event.tv64;
}
@@ -424,7 +424,7 @@ again:
/*
* Wakeup the cpus which have an expired event.
*/
- tick_do_broadcast(mask);
+ tick_do_broadcast(to_cpumask(tmpmask));
/*
* Two reasons for reprogram:
@@ -476,15 +476,16 @@ void tick_broadcast_oneshot_control(unsigned long reason)
goto out;
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
- if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
- cpu_set(cpu, tick_broadcast_oneshot_mask);
+ if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
+ cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
if (dev->next_event.tv64 < bc->next_event.tv64)
tick_broadcast_set_event(dev->next_event, 1);
}
} else {
- if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
- cpu_clear(cpu, tick_broadcast_oneshot_mask);
+ if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
+ cpumask_clear_cpu(cpu,
+ tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
if (dev->next_event.tv64 != KTIME_MAX)
tick_program_event(dev->next_event, 1);
@@ -502,15 +503,16 @@ out:
*/
static void tick_broadcast_clear_oneshot(int cpu)
{
- cpu_clear(cpu, tick_broadcast_oneshot_mask);
+ cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
}
-static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires)
+static void tick_broadcast_init_next_event(struct cpumask *mask,
+ ktime_t expires)
{
struct tick_device *td;
int cpu;
- for_each_cpu_mask_nr(cpu, *mask) {
+ for_each_cpu(cpu, mask) {
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev)
td->evtdev->next_event = expires;
@@ -526,7 +528,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
int cpu = smp_processor_id();
- cpumask_t mask;
bc->event_handler = tick_handle_oneshot_broadcast;
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
@@ -540,13 +541,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
* oneshot_mask bits for those and program the
* broadcast device to fire.
*/
- mask = tick_broadcast_mask;
- cpu_clear(cpu, mask);
- cpus_or(tick_broadcast_oneshot_mask,
- tick_broadcast_oneshot_mask, mask);
-
- if (was_periodic && !cpus_empty(mask)) {
- tick_broadcast_init_next_event(&mask, tick_next_period);
+ cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
+ cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
+ cpumask_or(tick_get_broadcast_oneshot_mask(),
+ tick_get_broadcast_oneshot_mask(),
+ to_cpumask(tmpmask));
+
+ if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
+ tick_broadcast_init_next_event(to_cpumask(tmpmask),
+ tick_next_period);
tick_broadcast_set_event(tick_next_period, 1);
} else
bc->next_event.tv64 = KTIME_MAX;
@@ -585,7 +588,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
* Clear the broadcast mask flag for the dead cpu, but do not
* stop the broadcast device!
*/
- cpu_clear(cpu, tick_broadcast_oneshot_mask);
+ cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index f8372be74122..63e05d423a09 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -254,7 +254,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
curdev = NULL;
}
clockevents_exchange_device(curdev, newdev);
- tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
+ tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
@@ -299,9 +299,9 @@ static void tick_shutdown(unsigned int *cpup)
}
/* Transfer the do_timer job away from this cpu */
if (*cpup == tick_do_timer_cpu) {
- int cpu = first_cpu(cpu_online_map);
+ int cpu = cpumask_first(cpu_online_mask);
- tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu :
+ tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
TICK_DO_TIMER_NONE;
}
spin_unlock_irqrestore(&tick_device_lock, flags);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 70f872c71f4e..76a574bbef97 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -247,7 +247,7 @@ void tick_nohz_stop_sched_tick(int inidle)
if (need_resched())
goto end;
- if (unlikely(local_softirq_pending())) {
+ if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
static int ratelimit;
if (ratelimit < 10) {
@@ -282,8 +282,31 @@ void tick_nohz_stop_sched_tick(int inidle)
/* Schedule the tick, if we are at least one jiffie off */
if ((long)delta_jiffies >= 1) {
+ /*
+ * calculate the expiry time for the next timer wheel
+ * timer
+ */
+ expires = ktime_add_ns(last_update, tick_period.tv64 *
+ delta_jiffies);
+
+ /*
+ * If this cpu is the one which updates jiffies, then
+ * give up the assignment and let it be taken by the
+ * cpu which runs the tick timer next, which might be
+ * this cpu as well. If we don't drop this here the
+ * jiffies might be stale and do_timer() never
+ * invoked.
+ */
+ if (cpu == tick_do_timer_cpu)
+ tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+
if (delta_jiffies > 1)
cpumask_set_cpu(cpu, nohz_cpu_mask);
+
+ /* Skip reprogram of event if its not changed */
+ if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
+ goto out;
+
/*
* nohz_stop_sched_tick can be called several times before
* the nohz_restart_sched_tick is called. This happens when
@@ -306,17 +329,6 @@ void tick_nohz_stop_sched_tick(int inidle)
rcu_enter_nohz();
}
- /*
- * If this cpu is the one which updates jiffies, then
- * give up the assignment and let it be taken by the
- * cpu which runs the tick timer next, which might be
- * this cpu as well. If we don't drop this here the
- * jiffies might be stale and do_timer() never
- * invoked.
- */
- if (cpu == tick_do_timer_cpu)
- tick_do_timer_cpu = TICK_DO_TIMER_NONE;
-
ts->idle_sleeps++;
/*
@@ -332,12 +344,7 @@ void tick_nohz_stop_sched_tick(int inidle)
goto out;
}
- /*
- * calculate the expiry time for the next timer wheel
- * timer
- */
- expires = ktime_add_ns(last_update, tick_period.tv64 *
- delta_jiffies);
+ /* Mark expiries */
ts->idle_expires = expires;
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
@@ -681,7 +688,6 @@ void tick_setup_sched_timer(void)
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function = tick_sched_timer;
- ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
/* Get the next period (per cpu) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
diff --git a/kernel/timer.c b/kernel/timer.c
index dbd50fabe4c7..566257d1dc10 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1192,25 +1192,25 @@ asmlinkage long sys_getppid(void)
asmlinkage long sys_getuid(void)
{
/* Only we change this so SMP safe */
- return current->uid;
+ return current_uid();
}
asmlinkage long sys_geteuid(void)
{
/* Only we change this so SMP safe */
- return current->euid;
+ return current_euid();
}
asmlinkage long sys_getgid(void)
{
/* Only we change this so SMP safe */
- return current->gid;
+ return current_gid();
}
asmlinkage long sys_getegid(void)
{
/* Only we change this so SMP safe */
- return current->egid;
+ return current_egid();
}
#endif
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index bde6f03512d5..e2a4ff6fc3a6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -244,16 +244,21 @@ config STACK_TRACER
This tracer works by hooking into every function call that the
kernel executes, and keeping a maximum stack depth value and
- stack-trace saved. Because this logic has to execute in every
- kernel function, all the time, this option can slow down the
- kernel measurably and is generally intended for kernel
- developers only.
+ stack-trace saved. If this is configured with DYNAMIC_FTRACE
+ then it will not have any overhead while the stack tracer
+ is disabled.
+
+ To enable the stack tracer on bootup, pass in 'stacktrace'
+ on the kernel command line.
+
+ The stack tracer can also be enabled or disabled via the
+ sysctl kernel.stack_tracer_enabled
Say N if unsure.
-config BTS_TRACER
+config HW_BRANCH_TRACER
depends on HAVE_HW_BRANCH_TRACER
- bool "Trace branches"
+ bool "Trace hw branches"
select TRACING
help
This tracer records all branches on the system in a circular
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 62dc561b6676..349d5a93653f 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
-obj-$(CONFIG_BTS_TRACER) += trace_bts.o
+obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
obj-$(CONFIG_POWER_TRACER) += trace_power.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a12f80efceaa..2f32969c09df 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1047,6 +1047,13 @@ ftrace_match(unsigned char *buff, int len, int enable)
int type = MATCH_FULL;
unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
unsigned i, match = 0, search_len = 0;
+ int not = 0;
+
+ if (buff[0] == '!') {
+ not = 1;
+ buff++;
+ len--;
+ }
for (i = 0; i < len; i++) {
if (buff[i] == '*') {
@@ -1100,8 +1107,12 @@ ftrace_match(unsigned char *buff, int len, int enable)
matched = 1;
break;
}
- if (matched)
- rec->flags |= flag;
+ if (matched) {
+ if (not)
+ rec->flags &= ~flag;
+ else
+ rec->flags |= flag;
+ }
}
pg = pg->next;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7f69cfeaadf7..a9d9760dc7b6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -69,6 +69,7 @@ void tracing_on(void)
{
set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
}
+EXPORT_SYMBOL_GPL(tracing_on);
/**
* tracing_off - turn off all tracing buffers
@@ -82,6 +83,7 @@ void tracing_off(void)
{
clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
}
+EXPORT_SYMBOL_GPL(tracing_off);
/**
* tracing_off_permanent - permanently disable ring buffers
@@ -107,16 +109,18 @@ u64 ring_buffer_time_stamp(int cpu)
preempt_disable_notrace();
/* shift to debug/test normalization and TIME_EXTENTS */
time = sched_clock() << DEBUG_SHIFT;
- preempt_enable_notrace();
+ preempt_enable_no_resched_notrace();
return time;
}
+EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
{
/* Just stupid testing the normalize function and deltas */
*ts >>= DEBUG_SHIFT;
}
+EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
#define RB_ALIGNMENT_SHIFT 2
@@ -166,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
{
return rb_event_length(event);
}
+EXPORT_SYMBOL_GPL(ring_buffer_event_length);
/* inline for ring buffer fast paths */
static inline void *
@@ -187,9 +192,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
{
return rb_event_data(event);
}
+EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define for_each_buffer_cpu(buffer, cpu) \
- for_each_cpu_mask(cpu, buffer->cpumask)
+ for_each_cpu(cpu, buffer->cpumask)
#define TS_SHIFT 27
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
@@ -258,11 +264,10 @@ struct ring_buffer_per_cpu {
};
struct ring_buffer {
- unsigned long size;
unsigned pages;
unsigned flags;
int cpus;
- cpumask_t cpumask;
+ cpumask_var_t cpumask;
atomic_t record_disabled;
struct mutex mutex;
@@ -428,7 +433,7 @@ extern int ring_buffer_page_too_big(void);
/**
* ring_buffer_alloc - allocate a new ring_buffer
- * @size: the size in bytes that is needed.
+ * @size: the size in bytes per cpu that is needed.
* @flags: attributes to set for the ring buffer.
*
* Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -453,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
if (!buffer)
return NULL;
+ if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
+ goto fail_free_buffer;
+
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
@@ -460,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
if (buffer->pages == 1)
buffer->pages++;
- buffer->cpumask = cpu_possible_map;
+ cpumask_copy(buffer->cpumask, cpu_possible_mask);
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
GFP_KERNEL);
if (!buffer->buffers)
- goto fail_free_buffer;
+ goto fail_free_cpumask;
for_each_buffer_cpu(buffer, cpu) {
buffer->buffers[cpu] =
@@ -487,10 +495,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
}
kfree(buffer->buffers);
+ fail_free_cpumask:
+ free_cpumask_var(buffer->cpumask);
+
fail_free_buffer:
kfree(buffer);
return NULL;
}
+EXPORT_SYMBOL_GPL(ring_buffer_alloc);
/**
* ring_buffer_free - free a ring buffer.
@@ -504,8 +516,11 @@ ring_buffer_free(struct ring_buffer *buffer)
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
+ free_cpumask_var(buffer->cpumask);
+
kfree(buffer);
}
+EXPORT_SYMBOL_GPL(ring_buffer_free);
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
@@ -681,6 +696,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
mutex_unlock(&buffer->mutex);
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(ring_buffer_resize);
static inline int rb_null_event(struct ring_buffer_event *event)
{
@@ -839,6 +855,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
* back to us). This allows us to do a simple loop to
* assign the commit to the tail.
*/
+ again:
while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
cpu_buffer->commit_page->page->commit =
cpu_buffer->commit_page->write;
@@ -854,6 +871,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->commit_page->write;
barrier();
}
+
+ /* again, keep gcc from optimizing */
+ barrier();
+
+ /*
+ * If an interrupt came in just after the first while loop
+ * and pushed the tail page forward, we will be left with
+ * a dangling commit that will never go forward.
+ */
+ if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
+ goto again;
}
static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
@@ -951,12 +979,15 @@ static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
unsigned type, unsigned long length, u64 *ts)
{
- struct buffer_page *tail_page, *head_page, *reader_page;
+ struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
unsigned long tail, write;
struct ring_buffer *buffer = cpu_buffer->buffer;
struct ring_buffer_event *event;
unsigned long flags;
+ commit_page = cpu_buffer->commit_page;
+ /* we just need to protect against interrupts */
+ barrier();
tail_page = cpu_buffer->tail_page;
write = local_add_return(length, &tail_page->write);
tail = write - length;
@@ -982,7 +1013,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
* it all the way around the buffer, bail, and warn
* about it.
*/
- if (unlikely(next_page == cpu_buffer->commit_page)) {
+ if (unlikely(next_page == commit_page)) {
WARN_ON_ONCE(1);
goto out_unlock;
}
@@ -1260,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
cpu = raw_smp_processor_id();
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu];
@@ -1290,6 +1321,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
ftrace_preempt_enable(resched);
return NULL;
}
+EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
@@ -1336,6 +1368,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
return 0;
}
+EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
/**
* ring_buffer_write - write data to the buffer without reserving
@@ -1371,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
cpu = raw_smp_processor_id();
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu];
@@ -1397,6 +1430,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
return ret;
}
+EXPORT_SYMBOL_GPL(ring_buffer_write);
static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
{
@@ -1423,6 +1457,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
{
atomic_inc(&buffer->record_disabled);
}
+EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
/**
* ring_buffer_record_enable - enable writes to the buffer
@@ -1435,6 +1470,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
{
atomic_dec(&buffer->record_disabled);
}
+EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
/**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1450,12 +1486,13 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
cpu_buffer = buffer->buffers[cpu];
atomic_inc(&cpu_buffer->record_disabled);
}
+EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
/**
* ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1469,12 +1506,13 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
cpu_buffer = buffer->buffers[cpu];
atomic_dec(&cpu_buffer->record_disabled);
}
+EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
/**
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1485,12 +1523,13 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
return cpu_buffer->entries;
}
+EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/**
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1501,12 +1540,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
return cpu_buffer->overrun;
}
+EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
* ring_buffer_entries - get the number of entries in a buffer
@@ -1529,6 +1569,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
return entries;
}
+EXPORT_SYMBOL_GPL(ring_buffer_entries);
/**
* ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1551,6 +1592,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
return overruns;
}
+EXPORT_SYMBOL_GPL(ring_buffer_overruns);
static void rb_iter_reset(struct ring_buffer_iter *iter)
{
@@ -1586,6 +1628,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
rb_iter_reset(iter);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
+EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
/**
* ring_buffer_iter_empty - check if an iterator has no more to read
@@ -1600,6 +1643,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
return iter->head_page == cpu_buffer->commit_page &&
iter->head == rb_commit_index(cpu_buffer);
}
+EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
static void
rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1814,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
struct buffer_page *reader;
int nr_loops = 0;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
cpu_buffer = buffer->buffers[cpu];
@@ -1866,6 +1910,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
return NULL;
}
+EXPORT_SYMBOL_GPL(ring_buffer_peek);
static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
@@ -1926,6 +1971,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
return NULL;
}
+EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
/**
* ring_buffer_peek - peek at the next event to be read
@@ -1987,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_event *event;
unsigned long flags;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2003,6 +2049,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
return event;
}
+EXPORT_SYMBOL_GPL(ring_buffer_consume);
/**
* ring_buffer_read_start - start a non consuming read of the buffer
@@ -2023,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
struct ring_buffer_iter *iter;
unsigned long flags;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
@@ -2045,6 +2092,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
return iter;
}
+EXPORT_SYMBOL_GPL(ring_buffer_read_start);
/**
* ring_buffer_finish - finish reading the iterator of the buffer
@@ -2061,6 +2109,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
atomic_dec(&cpu_buffer->record_disabled);
kfree(iter);
}
+EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
/**
* ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -2087,6 +2136,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
return event;
}
+EXPORT_SYMBOL_GPL(ring_buffer_read);
/**
* ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -2096,6 +2146,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
{
return BUF_PAGE_SIZE * buffer->pages;
}
+EXPORT_SYMBOL_GPL(ring_buffer_size);
static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2129,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2142,6 +2193,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
+EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
/**
* ring_buffer_reset - reset a ring buffer
@@ -2154,6 +2206,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
for_each_buffer_cpu(buffer, cpu)
ring_buffer_reset_cpu(buffer, cpu);
}
+EXPORT_SYMBOL_GPL(ring_buffer_reset);
/**
* rind_buffer_empty - is the ring buffer empty?
@@ -2172,6 +2225,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
}
return 1;
}
+EXPORT_SYMBOL_GPL(ring_buffer_empty);
/**
* ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2182,12 +2236,13 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1;
cpu_buffer = buffer->buffers[cpu];
return rb_per_cpu_empty(cpu_buffer);
}
+EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
/**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2205,13 +2260,12 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b;
- if (!cpu_isset(cpu, buffer_a->cpumask) ||
- !cpu_isset(cpu, buffer_b->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
+ !cpumask_test_cpu(cpu, buffer_b->cpumask))
return -EINVAL;
/* At least make sure the two buffers are somewhat the same */
- if (buffer_a->size != buffer_b->size ||
- buffer_a->pages != buffer_b->pages)
+ if (buffer_a->pages != buffer_b->pages)
return -EINVAL;
cpu_buffer_a = buffer_a->buffers[cpu];
@@ -2237,6 +2291,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
return 0;
}
+EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_data_page *bpage)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6adf660fc816..c580233add95 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -30,7 +30,6 @@
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/kprobes.h>
-#include <linux/seq_file.h>
#include <linux/writeback.h>
#include <linux/stacktrace.h>
@@ -90,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
preempt_enable();
}
-static cpumask_t __read_mostly tracing_buffer_mask;
+static cpumask_var_t __read_mostly tracing_buffer_mask;
#define for_each_tracing_cpu(cpu) \
- for_each_cpu_mask(cpu, tracing_buffer_mask)
+ for_each_cpu(cpu, tracing_buffer_mask)
/*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -287,6 +286,7 @@ static const char *trace_options[] = {
"annotate",
"userstacktrace",
"sym-userobj",
+ "printk-msg-only",
NULL
};
@@ -320,7 +320,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
data->pid = tsk->pid;
- data->uid = tsk->uid;
+ data->uid = task_uid(tsk);
data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
data->policy = tsk->policy;
data->rt_priority = tsk->rt_priority;
@@ -678,6 +678,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
ftrace_enable_cpu();
}
+void tracing_reset_online_cpus(struct trace_array *tr)
+{
+ int cpu;
+
+ tr->time_start = ftrace_now(tr->cpu);
+
+ for_each_online_cpu(cpu)
+ tracing_reset(tr, cpu);
+}
+
#define SAVED_CMDLINES 128
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -1299,7 +1309,7 @@ enum trace_file_type {
TRACE_FILE_ANNOTATE = 2,
};
-static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
+static void trace_iterator_increment(struct trace_iterator *iter)
{
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
@@ -1378,7 +1388,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
if (iter->ent)
- trace_iterator_increment(iter, iter->cpu);
+ trace_iterator_increment(iter);
return iter->ent ? iter : NULL;
}
@@ -1747,6 +1757,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
+static int task_state_char(unsigned long state)
+{
+ int bit = state ? __ffs(state) + 1 : 0;
+
+ return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
+}
+
/*
* The message is supposed to contain an ending newline.
* If the printing stops prematurely, try to add a newline of our own.
@@ -1794,10 +1811,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
return;
- if (cpu_isset(iter->cpu, iter->started))
+ if (cpumask_test_cpu(iter->cpu, iter->started))
return;
- cpu_set(iter->cpu, iter->started);
+ cpumask_set_cpu(iter->cpu, iter->started);
trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
}
@@ -1815,7 +1832,6 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
char *comm;
int S, T;
int i;
- unsigned state;
if (entry->type == TRACE_CONT)
return TRACE_TYPE_HANDLED;
@@ -1861,12 +1877,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
trace_assign_type(field, entry);
- T = field->next_state < sizeof(state_to_char) ?
- state_to_char[field->next_state] : 'X';
-
- state = field->prev_state ?
- __ffs(field->prev_state) + 1 : 0;
- S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
+ T = task_state_char(field->next_state);
+ S = task_state_char(field->prev_state);
comm = trace_find_cmdline(field->next_pid);
trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
field->prev_pid,
@@ -2007,10 +2019,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
trace_assign_type(field, entry);
- S = field->prev_state < sizeof(state_to_char) ?
- state_to_char[field->prev_state] : 'X';
- T = field->next_state < sizeof(state_to_char) ?
- state_to_char[field->next_state] : 'X';
+ T = task_state_char(field->next_state);
+ S = task_state_char(field->prev_state);
ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
field->prev_pid,
field->prev_prio,
@@ -2140,12 +2150,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
trace_assign_type(field, entry);
- S = field->prev_state < sizeof(state_to_char) ?
- state_to_char[field->prev_state] : 'X';
- T = field->next_state < sizeof(state_to_char) ?
- state_to_char[field->next_state] : 'X';
- if (entry->type == TRACE_WAKE)
- S = '+';
+ T = task_state_char(field->next_state);
+ S = entry->type == TRACE_WAKE ? '+' :
+ task_state_char(field->prev_state);
ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
field->prev_pid,
field->prev_prio,
@@ -2232,12 +2239,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
trace_assign_type(field, entry);
- S = field->prev_state < sizeof(state_to_char) ?
- state_to_char[field->prev_state] : 'X';
- T = field->next_state < sizeof(state_to_char) ?
- state_to_char[field->next_state] : 'X';
- if (entry->type == TRACE_WAKE)
- S = '+';
+ T = task_state_char(field->next_state);
+ S = entry->type == TRACE_WAKE ? '+' :
+ task_state_char(field->prev_state);
SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
SEQ_PUT_HEX_FIELD_RET(s, S);
@@ -2265,6 +2269,25 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
return TRACE_TYPE_HANDLED;
}
+static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ struct print_entry *field;
+ int ret;
+
+ trace_assign_type(field, entry);
+
+ ret = trace_seq_printf(s, field->buf);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ if (entry->flags & TRACE_FLAG_CONT)
+ trace_seq_print_cont(s, iter);
+
+ return TRACE_TYPE_HANDLED;
+}
+
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
@@ -2345,6 +2368,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
return ret;
}
+ if (iter->ent->type == TRACE_PRINT &&
+ trace_flags & TRACE_ITER_PRINTK &&
+ trace_flags & TRACE_ITER_PRINTK_MSGONLY)
+ return print_printk_msg_only(iter);
+
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
@@ -2425,7 +2453,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
/* Notify the tracer early; before we stop tracing. */
if (iter->trace && iter->trace->open)
- iter->trace->open(iter);
+ iter->trace->open(iter);
/* Annotate start of buffers if we had overruns */
if (ring_buffer_overruns(iter->tr->buffer))
@@ -2618,13 +2646,7 @@ static struct file_operations show_traces_fops = {
/*
* Only trace on a CPU if the bitmask is set:
*/
-static cpumask_t tracing_cpumask = CPU_MASK_ALL;
-
-/*
- * When tracing/tracing_cpu_mask is modified then this holds
- * the new bitmask we are about to install:
- */
-static cpumask_t tracing_cpumask_new;
+static cpumask_var_t tracing_cpumask;
/*
* The tracer itself will not take this lock, but still we want
@@ -2646,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
mutex_lock(&tracing_cpumask_update_lock);
- len = cpumask_scnprintf(mask_str, count, &tracing_cpumask);
+ len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
if (count - len < 2) {
count = -EINVAL;
goto out_err;
@@ -2665,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
int err, cpu;
+ cpumask_var_t tracing_cpumask_new;
+
+ if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+ return -ENOMEM;
mutex_lock(&tracing_cpumask_update_lock);
- err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new);
+ err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
if (err)
goto err_unlock;
@@ -2678,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
* Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask:
*/
- if (cpu_isset(cpu, tracing_cpumask) &&
- !cpu_isset(cpu, tracing_cpumask_new)) {
+ if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+ !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&global_trace.data[cpu]->disabled);
}
- if (!cpu_isset(cpu, tracing_cpumask) &&
- cpu_isset(cpu, tracing_cpumask_new)) {
+ if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+ cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&global_trace.data[cpu]->disabled);
}
}
__raw_spin_unlock(&ftrace_max_lock);
local_irq_enable();
- tracing_cpumask = tracing_cpumask_new;
+ cpumask_copy(tracing_cpumask, tracing_cpumask_new);
mutex_unlock(&tracing_cpumask_update_lock);
+ free_cpumask_var(tracing_cpumask_new);
return count;
err_unlock:
mutex_unlock(&tracing_cpumask_update_lock);
+ free_cpumask_var(tracing_cpumask);
return err;
}
@@ -3086,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (!iter)
return -ENOMEM;
+ if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
+ kfree(iter);
+ return -ENOMEM;
+ }
+
mutex_lock(&trace_types_lock);
/* trace pipe does not show start of buffer */
- cpus_setall(iter->started);
+ cpumask_setall(iter->started);
iter->tr = &global_trace;
iter->trace = current_trace;
@@ -3106,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
{
struct trace_iterator *iter = file->private_data;
+ free_cpumask_var(iter->started);
kfree(iter);
atomic_dec(&tracing_reader);
@@ -3724,7 +3758,6 @@ void ftrace_dump(void)
static DEFINE_SPINLOCK(ftrace_dump_lock);
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
- static cpumask_t mask;
static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
@@ -3758,8 +3791,6 @@ void ftrace_dump(void)
* and then release the locks again.
*/
- cpus_clear(mask);
-
while (!trace_empty(&iter)) {
if (!cnt)
@@ -3795,19 +3826,28 @@ __init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
int i;
+ int ret = -ENOMEM;
- /* TODO: make the number of buffers hot pluggable with CPUS */
- tracing_buffer_mask = cpu_possible_map;
+ if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
+ goto out;
+
+ if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
+ goto out_free_buffer_mask;
+ cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
+ cpumask_copy(tracing_cpumask, cpu_all_mask);
+
+ /* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(trace_buf_size,
TRACE_BUFFER_FLAGS);
if (!global_trace.buffer) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1);
- return 0;
+ goto out_free_cpumask;
}
global_trace.entries = ring_buffer_size(global_trace.buffer);
+
#ifdef CONFIG_TRACER_MAX_TRACE
max_tr.buffer = ring_buffer_alloc(trace_buf_size,
TRACE_BUFFER_FLAGS);
@@ -3815,7 +3855,7 @@ __init static int tracer_alloc_buffers(void)
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
WARN_ON(1);
ring_buffer_free(global_trace.buffer);
- return 0;
+ goto out_free_cpumask;
}
max_tr.entries = ring_buffer_size(max_tr.buffer);
WARN_ON(max_tr.entries != global_trace.entries);
@@ -3845,8 +3885,14 @@ __init static int tracer_alloc_buffers(void)
&trace_panic_notifier);
register_die_notifier(&trace_die_notifier);
+ ret = 0;
- return 0;
+out_free_cpumask:
+ free_cpumask_var(tracing_cpumask);
+out_free_buffer_mask:
+ free_cpumask_var(tracing_buffer_mask);
+out:
+ return ret;
}
early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5ac697065a48..4d3d381bfd95 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -28,7 +28,7 @@ enum trace_type {
TRACE_GRAPH_RET,
TRACE_GRAPH_ENT,
TRACE_USER_STACK,
- TRACE_BTS,
+ TRACE_HW_BRANCHES,
TRACE_POWER,
__TRACE_LAST_TYPE
@@ -159,10 +159,10 @@ struct trace_branch {
char correct;
};
-struct bts_entry {
+struct hw_branch_entry {
struct trace_entry ent;
- unsigned long from;
- unsigned long to;
+ u64 from;
+ u64 to;
};
struct trace_power {
@@ -278,7 +278,7 @@ extern void __ftrace_bad_type(void);
TRACE_GRAPH_ENT); \
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
- IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
+ IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
__ftrace_bad_type(); \
} while (0)
@@ -368,12 +368,13 @@ struct trace_iterator {
loff_t pos;
long idx;
- cpumask_t started;
+ cpumask_var_t started;
};
int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
+void tracing_reset_online_cpus(struct trace_array *tr);
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
@@ -414,9 +415,7 @@ void trace_function(struct trace_array *tr,
void trace_graph_return(struct ftrace_graph_ret *trace);
int trace_graph_entry(struct ftrace_graph_ent *trace);
-void trace_bts(struct trace_array *tr,
- unsigned long from,
- unsigned long to);
+void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
@@ -580,7 +579,8 @@ enum trace_iterator_flags {
TRACE_ITER_BRANCH = 0x1000,
TRACE_ITER_ANNOTATE = 0x2000,
TRACE_ITER_USERSTACKTRACE = 0x4000,
- TRACE_ITER_SYM_USEROBJ = 0x8000
+ TRACE_ITER_SYM_USEROBJ = 0x8000,
+ TRACE_ITER_PRINTK_MSGONLY = 0x10000
};
/*
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a4fa2c57e34e..366c8c333e13 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -37,22 +37,12 @@ void disable_boot_trace(void)
tracing_stop_sched_switch_record();
}
-static void reset_boot_trace(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static int boot_trace_init(struct trace_array *tr)
{
int cpu;
boot_trace = tr;
- for_each_cpu_mask(cpu, cpu_possible_map)
+ for_each_cpu(cpu, cpu_possible_mask)
tracing_reset(tr, cpu);
tracing_sched_switch_assign_trace(tr);
@@ -130,7 +120,7 @@ struct tracer boot_tracer __read_mostly =
{
.name = "initcall",
.init = boot_trace_init,
- .reset = reset_boot_trace,
+ .reset = tracing_reset_online_cpus,
.print_line = initcall_print_line,
};
diff --git a/kernel/trace/trace_bts.c b/kernel/trace/trace_bts.c
deleted file mode 100644
index 23b76e4690ef..000000000000
--- a/kernel/trace/trace_bts.c
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * BTS tracer
- *
- * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
-#include <linux/ftrace.h>
-#include <linux/kallsyms.h>
-
-#include <asm/ds.h>
-
-#include "trace.h"
-
-
-#define SIZEOF_BTS (1 << 13)
-
-static DEFINE_PER_CPU(struct bts_tracer *, tracer);
-static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
-
-#define this_tracer per_cpu(tracer, smp_processor_id())
-#define this_buffer per_cpu(buffer, smp_processor_id())
-
-
-/*
- * Information to interpret a BTS record.
- * This will go into an in-kernel BTS interface.
- */
-static unsigned char sizeof_field;
-static unsigned long debugctl_mask;
-
-#define sizeof_bts (3 * sizeof_field)
-
-static void bts_trace_cpuinit(struct cpuinfo_x86 *c)
-{
- switch (c->x86) {
- case 0x6:
- switch (c->x86_model) {
- case 0x0 ... 0xC:
- break;
- case 0xD:
- case 0xE: /* Pentium M */
- sizeof_field = sizeof(long);
- debugctl_mask = (1<<6)|(1<<7);
- break;
- default:
- sizeof_field = 8;
- debugctl_mask = (1<<6)|(1<<7);
- break;
- }
- break;
- case 0xF:
- switch (c->x86_model) {
- case 0x0:
- case 0x1:
- case 0x2: /* Netburst */
- sizeof_field = sizeof(long);
- debugctl_mask = (1<<2)|(1<<3);
- break;
- default:
- /* sorry, don't know about them */
- break;
- }
- break;
- default:
- /* sorry, don't know about them */
- break;
- }
-}
-
-static inline void bts_enable(void)
-{
- unsigned long debugctl;
-
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl | debugctl_mask);
-}
-
-static inline void bts_disable(void)
-{
- unsigned long debugctl;
-
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl & ~debugctl_mask);
-}
-
-static void bts_trace_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
-static void bts_trace_start_cpu(void *arg)
-{
- this_tracer =
- ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
- /* ovfl = */ NULL, /* th = */ (size_t)-1);
- if (IS_ERR(this_tracer)) {
- this_tracer = NULL;
- return;
- }
-
- bts_enable();
-}
-
-static void bts_trace_start(struct trace_array *tr)
-{
- int cpu;
-
- bts_trace_reset(tr);
-
- for_each_cpu_mask(cpu, cpu_possible_map)
- smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
-}
-
-static void bts_trace_stop_cpu(void *arg)
-{
- if (this_tracer) {
- bts_disable();
-
- ds_release_bts(this_tracer);
- this_tracer = NULL;
- }
-}
-
-static void bts_trace_stop(struct trace_array *tr)
-{
- int cpu;
-
- for_each_cpu_mask(cpu, cpu_possible_map)
- smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
-}
-
-static int bts_trace_init(struct trace_array *tr)
-{
- bts_trace_cpuinit(&boot_cpu_data);
- bts_trace_reset(tr);
- bts_trace_start(tr);
-
- return 0;
-}
-
-static void bts_trace_print_header(struct seq_file *m)
-{
-#ifdef __i386__
- seq_puts(m, "# CPU# FROM TO FUNCTION\n");
- seq_puts(m, "# | | | |\n");
-#else
- seq_puts(m,
- "# CPU# FROM TO FUNCTION\n");
- seq_puts(m,
- "# | | | |\n");
-#endif
-}
-
-static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
-{
- struct trace_entry *entry = iter->ent;
- struct trace_seq *seq = &iter->seq;
- struct bts_entry *it;
-
- trace_assign_type(it, entry);
-
- if (entry->type == TRACE_BTS) {
- int ret;
-#ifdef CONFIG_KALLSYMS
- char function[KSYM_SYMBOL_LEN];
- sprint_symbol(function, it->from);
-#else
- char *function = "<unknown>";
-#endif
-
- ret = trace_seq_printf(seq, "%4d 0x%lx -> 0x%lx [%s]\n",
- entry->cpu, it->from, it->to, function);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;;
- return TRACE_TYPE_HANDLED;
- }
- return TRACE_TYPE_UNHANDLED;
-}
-
-void trace_bts(struct trace_array *tr, unsigned long from, unsigned long to)
-{
- struct ring_buffer_event *event;
- struct bts_entry *entry;
- unsigned long irq;
-
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
- if (!event)
- return;
- entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, from);
- entry->ent.type = TRACE_BTS;
- entry->ent.cpu = smp_processor_id();
- entry->from = from;
- entry->to = to;
- ring_buffer_unlock_commit(tr->buffer, event, irq);
-}
-
-static void trace_bts_at(struct trace_array *tr, size_t index)
-{
- const void *raw = NULL;
- unsigned long from, to;
- int err;
-
- err = ds_access_bts(this_tracer, index, &raw);
- if (err < 0)
- return;
-
- from = *(const unsigned long *)raw;
- to = *(const unsigned long *)((const char *)raw + sizeof_field);
-
- trace_bts(tr, from, to);
-}
-
-static void trace_bts_cpu(void *arg)
-{
- struct trace_array *tr = (struct trace_array *) arg;
- size_t index = 0, end = 0, i;
- int err;
-
- if (!this_tracer)
- return;
-
- bts_disable();
-
- err = ds_get_bts_index(this_tracer, &index);
- if (err < 0)
- goto out;
-
- err = ds_get_bts_end(this_tracer, &end);
- if (err < 0)
- goto out;
-
- for (i = index; i < end; i++)
- trace_bts_at(tr, i);
-
- for (i = 0; i < index; i++)
- trace_bts_at(tr, i);
-
-out:
- bts_enable();
-}
-
-static void trace_bts_prepare(struct trace_iterator *iter)
-{
- int cpu;
-
- for_each_cpu_mask(cpu, cpu_possible_map)
- smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
-}
-
-struct tracer bts_tracer __read_mostly =
-{
- .name = "bts",
- .init = bts_trace_init,
- .reset = bts_trace_stop,
- .print_header = bts_trace_print_header,
- .print_line = bts_trace_print_line,
- .start = bts_trace_start,
- .stop = bts_trace_stop,
- .open = trace_bts_prepare
-};
-
-__init static int init_bts_trace(void)
-{
- return register_tracer(&bts_tracer);
-}
-device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index e74f6d0a3216..9236d7e25a16 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -16,20 +16,10 @@
#include "trace.h"
-static void function_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static void start_function_trace(struct trace_array *tr)
{
tr->cpu = get_cpu();
- function_reset(tr);
+ tracing_reset_online_cpus(tr);
put_cpu();
tracing_start_cmdline_record();
@@ -55,7 +45,7 @@ static void function_trace_reset(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr)
{
- function_reset(tr);
+ tracing_reset_online_cpus(tr);
}
static struct tracer function_trace __read_mostly =
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index af60eef4cbcc..930c08e5b38e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -79,7 +79,7 @@ print_graph_cpu(struct trace_seq *s, int cpu)
int i;
int ret;
int log10_this = log10_cpu(cpu);
- int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
+ int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
/*
@@ -231,6 +231,49 @@ trace_branch_is_leaf(struct trace_iterator *iter,
return true;
}
+static enum print_line_t
+print_graph_irq(struct trace_seq *s, unsigned long addr,
+ enum trace_type type, int cpu, pid_t pid)
+{
+ int ret;
+
+ if (addr < (unsigned long)__irqentry_text_start ||
+ addr >= (unsigned long)__irqentry_text_end)
+ return TRACE_TYPE_UNHANDLED;
+
+ if (type == TRACE_GRAPH_ENT) {
+ ret = trace_seq_printf(s, "==========> | ");
+ } else {
+ /* Cpu */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+ ret = print_graph_cpu(s, cpu);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+ /* Proc */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+ ret = print_graph_proc(s, pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ret = trace_seq_printf(s, " | ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ /* No overhead */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+ ret = trace_seq_printf(s, " ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ ret = trace_seq_printf(s, "<========== |\n");
+ }
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+}
static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s)
@@ -344,7 +387,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
static enum print_line_t
print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
- struct trace_seq *s)
+ struct trace_seq *s, pid_t pid, int cpu)
{
int i;
int ret;
@@ -357,8 +400,18 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
return TRACE_TYPE_PARTIAL_LINE;
}
- /* No time */
- ret = trace_seq_printf(s, " | ");
+ /* Interrupt */
+ ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
+ if (ret == TRACE_TYPE_UNHANDLED) {
+ /* No time */
+ ret = trace_seq_printf(s, " | ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ } else {
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
@@ -410,7 +463,7 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
if (trace_branch_is_leaf(iter, field))
return print_graph_entry_leaf(iter, field, s);
else
- return print_graph_entry_nested(field, s);
+ return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
}
@@ -474,6 +527,11 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
+
+ ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+
return TRACE_TYPE_HANDLED;
}
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
new file mode 100644
index 000000000000..649df22d435f
--- /dev/null
+++ b/kernel/trace/trace_hw_branches.c
@@ -0,0 +1,195 @@
+/*
+ * h/w branch tracer for x86 based on bts
+ *
+ * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/ftrace.h>
+#include <linux/kallsyms.h>
+
+#include <asm/ds.h>
+
+#include "trace.h"
+
+
+#define SIZEOF_BTS (1 << 13)
+
+static DEFINE_PER_CPU(struct bts_tracer *, tracer);
+static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
+
+#define this_tracer per_cpu(tracer, smp_processor_id())
+#define this_buffer per_cpu(buffer, smp_processor_id())
+
+
+static void bts_trace_start_cpu(void *arg)
+{
+ if (this_tracer)
+ ds_release_bts(this_tracer);
+
+ this_tracer =
+ ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
+ /* ovfl = */ NULL, /* th = */ (size_t)-1,
+ BTS_KERNEL);
+ if (IS_ERR(this_tracer)) {
+ this_tracer = NULL;
+ return;
+ }
+}
+
+static void bts_trace_start(struct trace_array *tr)
+{
+ int cpu;
+
+ tracing_reset_online_cpus(tr);
+
+ for_each_cpu(cpu, cpu_possible_mask)
+ smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
+}
+
+static void bts_trace_stop_cpu(void *arg)
+{
+ if (this_tracer) {
+ ds_release_bts(this_tracer);
+ this_tracer = NULL;
+ }
+}
+
+static void bts_trace_stop(struct trace_array *tr)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_possible_mask)
+ smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
+}
+
+static int bts_trace_init(struct trace_array *tr)
+{
+ tracing_reset_online_cpus(tr);
+ bts_trace_start(tr);
+
+ return 0;
+}
+
+static void bts_trace_print_header(struct seq_file *m)
+{
+ seq_puts(m,
+ "# CPU# FROM TO FUNCTION\n");
+ seq_puts(m,
+ "# | | | |\n");
+}
+
+static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *seq = &iter->seq;
+ struct hw_branch_entry *it;
+
+ trace_assign_type(it, entry);
+
+ if (entry->type == TRACE_HW_BRANCHES) {
+ if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
+ trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
+ it->from, it->to) &&
+ (!it->from ||
+ seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
+ trace_seq_printf(seq, "\n"))
+ return TRACE_TYPE_HANDLED;
+ return TRACE_TYPE_PARTIAL_LINE;;
+ }
+ return TRACE_TYPE_UNHANDLED;
+}
+
+void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
+{
+ struct ring_buffer_event *event;
+ struct hw_branch_entry *entry;
+ unsigned long irq;
+
+ event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ tracing_generic_entry_update(&entry->ent, 0, from);
+ entry->ent.type = TRACE_HW_BRANCHES;
+ entry->ent.cpu = smp_processor_id();
+ entry->from = from;
+ entry->to = to;
+ ring_buffer_unlock_commit(tr->buffer, event, irq);
+}
+
+static void trace_bts_at(struct trace_array *tr,
+ const struct bts_trace *trace, void *at)
+{
+ struct bts_struct bts;
+ int err = 0;
+
+ WARN_ON_ONCE(!trace->read);
+ if (!trace->read)
+ return;
+
+ err = trace->read(this_tracer, at, &bts);
+ if (err < 0)
+ return;
+
+ switch (bts.qualifier) {
+ case BTS_BRANCH:
+ trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
+ break;
+ }
+}
+
+static void trace_bts_cpu(void *arg)
+{
+ struct trace_array *tr = (struct trace_array *) arg;
+ const struct bts_trace *trace;
+ unsigned char *at;
+
+ if (!this_tracer)
+ return;
+
+ ds_suspend_bts(this_tracer);
+ trace = ds_read_bts(this_tracer);
+ if (!trace)
+ goto out;
+
+ for (at = trace->ds.top; (void *)at < trace->ds.end;
+ at += trace->ds.size)
+ trace_bts_at(tr, trace, at);
+
+ for (at = trace->ds.begin; (void *)at < trace->ds.top;
+ at += trace->ds.size)
+ trace_bts_at(tr, trace, at);
+
+out:
+ ds_resume_bts(this_tracer);
+}
+
+static void trace_bts_prepare(struct trace_iterator *iter)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_possible_mask)
+ smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
+}
+
+struct tracer bts_tracer __read_mostly =
+{
+ .name = "hw-branch-tracer",
+ .init = bts_trace_init,
+ .reset = bts_trace_stop,
+ .print_header = bts_trace_print_header,
+ .print_line = bts_trace_print_line,
+ .start = bts_trace_start,
+ .stop = bts_trace_stop,
+ .open = trace_bts_prepare
+};
+
+__init static int init_bts_trace(void)
+{
+ return register_tracer(&bts_tracer);
+}
+device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 2fb6da6523b3..fffcb069f1dc 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -22,14 +22,10 @@ static unsigned long prev_overruns;
static void mmio_reset_data(struct trace_array *tr)
{
- int cpu;
-
overrun_detected = false;
prev_overruns = 0;
- tr->time_start = ftrace_now(tr->cpu);
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
+ tracing_reset_online_cpus(tr);
}
static int mmio_trace_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index a7172a352f62..7bda248daf55 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -39,7 +39,7 @@ static int power_trace_init(struct trace_array *tr)
trace_power_enabled = 1;
- for_each_cpu_mask(cpu, cpu_possible_map)
+ for_each_cpu(cpu, cpu_possible_mask)
tracing_reset(tr, cpu);
return 0;
}
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 863390557b44..df175cb4564f 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -49,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
}
static void
-probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
+probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
{
struct trace_array_cpu *data;
unsigned long flags;
@@ -72,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
local_irq_restore(flags);
}
-static void sched_switch_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static int tracing_sched_register(void)
{
int ret;
@@ -197,7 +187,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
static void start_sched_trace(struct trace_array *tr)
{
- sched_switch_reset(tr);
+ tracing_reset_online_cpus(tr);
tracing_start_sched_switch_record();
}
@@ -221,7 +211,7 @@ static void sched_switch_trace_reset(struct trace_array *tr)
static void sched_switch_trace_start(struct trace_array *tr)
{
- sched_switch_reset(tr);
+ tracing_reset_online_cpus(tr);
tracing_start_sched_switch();
}
@@ -247,3 +237,4 @@ __init static int init_sched_switch_trace(void)
return register_tracer(&sched_switch_trace);
}
device_initcall(init_sched_switch_trace);
+
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 0067b49746c1..43586b689e31 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -211,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
}
static void
-probe_wakeup(struct rq *rq, struct task_struct *p)
+probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
int cpu = smp_processor_id();
unsigned long flags;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0b863f2cbc8e..d0871bc0aca5 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
+#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/fs.h>
#include "trace.h"
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock =
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
+static DEFINE_MUTEX(stack_sysctl_mutex);
+
+int stack_tracer_enabled;
+static int last_stack_tracer_enabled;
static inline void check_stack(void)
{
@@ -174,7 +179,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
return count;
}
-static struct file_operations stack_max_size_fops = {
+static const struct file_operations stack_max_size_fops = {
.open = tracing_open_generic,
.read = stack_max_size_read,
.write = stack_max_size_write,
@@ -272,7 +277,7 @@ static int t_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations stack_trace_seq_ops = {
+static const struct seq_operations stack_trace_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
@@ -288,12 +293,47 @@ static int stack_trace_open(struct inode *inode, struct file *file)
return ret;
}
-static struct file_operations stack_trace_fops = {
+static const struct file_operations stack_trace_fops = {
.open = stack_trace_open,
.read = seq_read,
.llseek = seq_lseek,
};
+int
+stack_trace_sysctl(struct ctl_table *table, int write,
+ struct file *file, void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+
+ mutex_lock(&stack_sysctl_mutex);
+
+ ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
+
+ if (ret || !write ||
+ (last_stack_tracer_enabled == stack_tracer_enabled))
+ goto out;
+
+ last_stack_tracer_enabled = stack_tracer_enabled;
+
+ if (stack_tracer_enabled)
+ register_ftrace_function(&trace_ops);
+ else
+ unregister_ftrace_function(&trace_ops);
+
+ out:
+ mutex_unlock(&stack_sysctl_mutex);
+ return ret;
+}
+
+static __init int enable_stacktrace(char *str)
+{
+ stack_tracer_enabled = 1;
+ last_stack_tracer_enabled = 1;
+ return 1;
+}
+__setup("stacktrace", enable_stacktrace);
+
static __init int stack_trace_init(void)
{
struct dentry *d_tracer;
@@ -311,7 +351,8 @@ static __init int stack_trace_init(void)
if (!entry)
pr_warning("Could not create debugfs 'stack_trace' entry\n");
- register_ftrace_function(&trace_ops);
+ if (stack_tracer_enabled)
+ register_ftrace_function(&trace_ops);
return 0;
}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 54960edb96d0..eaca5ad803ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -196,27 +196,19 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
-static void start_stack_timer(int cpu)
+static void start_stack_timer(void *unused)
{
- struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
+ struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = stack_trace_timer_fn;
- hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
}
static void start_stack_timers(void)
{
- cpumask_t saved_mask = current->cpus_allowed;
- int cpu;
-
- for_each_online_cpu(cpu) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
- start_stack_timer(cpu);
- }
- set_cpus_allowed_ptr(current, &saved_mask);
+ on_each_cpu(start_stack_timer, NULL, 1);
}
static void stop_stack_timer(int cpu)
@@ -234,20 +226,10 @@ static void stop_stack_timers(void)
stop_stack_timer(cpu);
}
-static void stack_reset(struct trace_array *tr)
-{
- int cpu;
-
- tr->time_start = ftrace_now(tr->cpu);
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-}
-
static void start_stack_trace(struct trace_array *tr)
{
mutex_lock(&sample_timer_lock);
- stack_reset(tr);
+ tracing_reset_online_cpus(tr);
start_stack_timers();
tracer_enabled = 1;
mutex_unlock(&sample_timer_lock);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 8ebcd8532dfb..2dc06ab35716 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -27,6 +27,7 @@
*/
void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
{
+ const struct cred *tcred;
struct timespec uptime, ts;
u64 ac_etime;
@@ -53,10 +54,11 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
stats->ac_flag |= AXSIG;
stats->ac_nice = task_nice(tsk);
stats->ac_sched = tsk->policy;
- stats->ac_uid = tsk->uid;
- stats->ac_gid = tsk->gid;
stats->ac_pid = tsk->pid;
rcu_read_lock();
+ tcred = __task_cred(tsk);
+ stats->ac_uid = tcred->uid;
+ stats->ac_gid = tcred->gid;
stats->ac_ppid = pid_alive(tsk) ?
rcu_dereference(tsk->real_parent)->tgid : 0;
rcu_read_unlock();
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 3e41c1673e2f..2460c3199b5a 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -84,11 +84,12 @@ asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid)
{
+ const struct cred *cred = current_cred();
int retval;
- if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
- !(retval = put_user(high2lowuid(current->euid), euid)))
- retval = put_user(high2lowuid(current->suid), suid);
+ if (!(retval = put_user(high2lowuid(cred->uid), ruid)) &&
+ !(retval = put_user(high2lowuid(cred->euid), euid)))
+ retval = put_user(high2lowuid(cred->suid), suid);
return retval;
}
@@ -104,11 +105,12 @@ asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid)
{
+ const struct cred *cred = current_cred();
int retval;
- if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
- !(retval = put_user(high2lowgid(current->egid), egid)))
- retval = put_user(high2lowgid(current->sgid), sgid);
+ if (!(retval = put_user(high2lowgid(cred->gid), rgid)) &&
+ !(retval = put_user(high2lowgid(cred->egid), egid)))
+ retval = put_user(high2lowgid(cred->sgid), sgid);
return retval;
}
@@ -161,25 +163,24 @@ static int groups16_from_user(struct group_info *group_info,
asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist)
{
- int i = 0;
+ const struct cred *cred = current_cred();
+ int i;
if (gidsetsize < 0)
return -EINVAL;
- get_group_info(current->group_info);
- i = current->group_info->ngroups;
+ i = cred->group_info->ngroups;
if (gidsetsize) {
if (i > gidsetsize) {
i = -EINVAL;
goto out;
}
- if (groups16_to_user(grouplist, current->group_info)) {
+ if (groups16_to_user(grouplist, cred->group_info)) {
i = -EFAULT;
goto out;
}
}
out:
- put_group_info(current->group_info);
return i;
}
@@ -210,20 +211,20 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist)
asmlinkage long sys_getuid16(void)
{
- return high2lowuid(current->uid);
+ return high2lowuid(current_uid());
}
asmlinkage long sys_geteuid16(void)
{
- return high2lowuid(current->euid);
+ return high2lowuid(current_euid());
}
asmlinkage long sys_getgid16(void)
{
- return high2lowgid(current->gid);
+ return high2lowgid(current_gid());
}
asmlinkage long sys_getegid16(void)
{
- return high2lowgid(current->egid);
+ return high2lowgid(current_egid());
}
diff --git a/kernel/user.c b/kernel/user.c
index cec2224bc9f5..477b6660f447 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,12 +16,13 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/user_namespace.h>
+#include "cred-internals.h"
struct user_namespace init_user_ns = {
.kref = {
- .refcount = ATOMIC_INIT(2),
+ .refcount = ATOMIC_INIT(1),
},
- .root_user = &root_user,
+ .creator = &root_user,
};
EXPORT_SYMBOL_GPL(init_user_ns);
@@ -47,12 +48,14 @@ static struct kmem_cache *uid_cachep;
*/
static DEFINE_SPINLOCK(uidhash_lock);
+/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
struct user_struct root_user = {
- .__count = ATOMIC_INIT(1),
+ .__count = ATOMIC_INIT(2),
.processes = ATOMIC_INIT(1),
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
+ .user_ns = &init_user_ns,
#ifdef CONFIG_USER_SCHED
.tg = &init_task_group,
#endif
@@ -106,16 +109,10 @@ static int sched_create_user(struct user_struct *up)
return rc;
}
-static void sched_switch_user(struct task_struct *p)
-{
- sched_move_task(p);
-}
-
#else /* CONFIG_USER_SCHED */
static void sched_destroy_user(struct user_struct *up) { }
static int sched_create_user(struct user_struct *up) { return 0; }
-static void sched_switch_user(struct task_struct *p) { }
#endif /* CONFIG_USER_SCHED */
@@ -244,13 +241,21 @@ static struct kobj_type uids_ktype = {
.release = uids_release,
};
-/* create /sys/kernel/uids/<uid>/cpu_share file for this user */
+/*
+ * Create /sys/kernel/uids/<uid>/cpu_share file for this user
+ * We do not create this file for users in a user namespace (until
+ * sysfs tagging is implemented).
+ *
+ * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
+ */
static int uids_user_create(struct user_struct *up)
{
struct kobject *kobj = &up->kobj;
int error;
memset(kobj, 0, sizeof(struct kobject));
+ if (up->user_ns != &init_user_ns)
+ return 0;
kobj->kset = uids_kset;
error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
if (error) {
@@ -286,6 +291,8 @@ static void remove_user_sysfs_dir(struct work_struct *w)
unsigned long flags;
int remove_user = 0;
+ if (up->user_ns != &init_user_ns)
+ return;
/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
* atomic.
*/
@@ -321,12 +328,13 @@ done:
* IRQ state (as stored in flags) is restored and uidhash_lock released
* upon function exit.
*/
-static inline void free_user(struct user_struct *up, unsigned long flags)
+static void free_user(struct user_struct *up, unsigned long flags)
{
/* restore back the count */
atomic_inc(&up->__count);
spin_unlock_irqrestore(&uidhash_lock, flags);
+ put_user_ns(up->user_ns);
INIT_WORK(&up->work, remove_user_sysfs_dir);
schedule_work(&up->work);
}
@@ -342,13 +350,14 @@ static inline void uids_mutex_unlock(void) { }
* IRQ state (as stored in flags) is restored and uidhash_lock released
* upon function exit.
*/
-static inline void free_user(struct user_struct *up, unsigned long flags)
+static void free_user(struct user_struct *up, unsigned long flags)
{
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
sched_destroy_user(up);
key_put(up->uid_keyring);
key_put(up->session_keyring);
+ put_user_ns(up->user_ns);
kmem_cache_free(uid_cachep, up);
}
@@ -364,7 +373,7 @@ struct user_struct *find_user(uid_t uid)
{
struct user_struct *ret;
unsigned long flags;
- struct user_namespace *ns = current->nsproxy->user_ns;
+ struct user_namespace *ns = current_user_ns();
spin_lock_irqsave(&uidhash_lock, flags);
ret = uid_hash_find(uid, uidhashentry(ns, uid));
@@ -411,6 +420,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
if (sched_create_user(new) < 0)
goto out_free_user;
+ new->user_ns = get_user_ns(ns);
+
if (uids_user_create(new))
goto out_destoy_sched;
@@ -434,7 +445,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
up = new;
}
spin_unlock_irq(&uidhash_lock);
-
}
uids_mutex_unlock();
@@ -443,6 +453,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
out_destoy_sched:
sched_destroy_user(new);
+ put_user_ns(new->user_ns);
out_free_user:
kmem_cache_free(uid_cachep, new);
out_unlock:
@@ -450,63 +461,6 @@ out_unlock:
return NULL;
}
-void switch_uid(struct user_struct *new_user)
-{
- struct user_struct *old_user;
-
- /* What if a process setreuid()'s and this brings the
- * new uid over his NPROC rlimit? We can check this now
- * cheaply with the new uid cache, so if it matters
- * we should be checking for it. -DaveM
- */
- old_user = current->user;
- atomic_inc(&new_user->processes);
- atomic_dec(&old_user->processes);
- switch_uid_keyring(new_user);
- current->user = new_user;
- sched_switch_user(current);
-
- /*
- * We need to synchronize with __sigqueue_alloc()
- * doing a get_uid(p->user).. If that saw the old
- * user value, we need to wait until it has exited
- * its critical region before we can free the old
- * structure.
- */
- smp_mb();
- spin_unlock_wait(&current->sighand->siglock);
-
- free_uid(old_user);
- suid_keys(current);
-}
-
-#ifdef CONFIG_USER_NS
-void release_uids(struct user_namespace *ns)
-{
- int i;
- unsigned long flags;
- struct hlist_head *head;
- struct hlist_node *nd;
-
- spin_lock_irqsave(&uidhash_lock, flags);
- /*
- * collapse the chains so that the user_struct-s will
- * be still alive, but not in hashes. subsequent free_uid()
- * will free them.
- */
- for (i = 0; i < UIDHASH_SZ; i++) {
- head = ns->uidhash_table + i;
- while (!hlist_empty(head)) {
- nd = head->first;
- hlist_del_init(nd);
- }
- }
- spin_unlock_irqrestore(&uidhash_lock, flags);
-
- free_uid(ns->root_user);
-}
-#endif
-
static int __init uid_cache_init(void)
{
int n;
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 532858fa5b88..79084311ee57 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -9,60 +9,55 @@
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
+#include <linux/cred.h>
/*
- * Clone a new ns copying an original user ns, setting refcount to 1
- * @old_ns: namespace to clone
- * Return NULL on error (failure to kmalloc), new ns otherwise
+ * Create a new user namespace, deriving the creator from the user in the
+ * passed credentials, and replacing that user with the new root user for the
+ * new namespace.
+ *
+ * This is called by copy_creds(), which will finish setting the target task's
+ * credentials.
*/
-static struct user_namespace *clone_user_ns(struct user_namespace *old_ns)
+int create_user_ns(struct cred *new)
{
struct user_namespace *ns;
- struct user_struct *new_user;
+ struct user_struct *root_user;
int n;
ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL);
if (!ns)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
kref_init(&ns->kref);
for (n = 0; n < UIDHASH_SZ; ++n)
INIT_HLIST_HEAD(ns->uidhash_table + n);
- /* Insert new root user. */
- ns->root_user = alloc_uid(ns, 0);
- if (!ns->root_user) {
+ /* Alloc new root user. */
+ root_user = alloc_uid(ns, 0);
+ if (!root_user) {
kfree(ns);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
- /* Reset current->user with a new one */
- new_user = alloc_uid(ns, current->uid);
- if (!new_user) {
- free_uid(ns->root_user);
- kfree(ns);
- return ERR_PTR(-ENOMEM);
- }
-
- switch_uid(new_user);
- return ns;
-}
-
-struct user_namespace * copy_user_ns(int flags, struct user_namespace *old_ns)
-{
- struct user_namespace *new_ns;
-
- BUG_ON(!old_ns);
- get_user_ns(old_ns);
-
- if (!(flags & CLONE_NEWUSER))
- return old_ns;
+ /* set the new root user in the credentials under preparation */
+ ns->creator = new->user;
+ new->user = root_user;
+ new->uid = new->euid = new->suid = new->fsuid = 0;
+ new->gid = new->egid = new->sgid = new->fsgid = 0;
+ put_group_info(new->group_info);
+ new->group_info = get_group_info(&init_groups);
+#ifdef CONFIG_KEYS
+ key_put(new->request_key_auth);
+ new->request_key_auth = NULL;
+#endif
+ /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
- new_ns = clone_user_ns(old_ns);
+ /* alloc_uid() incremented the userns refcount. Just set it to 1 */
+ kref_set(&ns->kref, 1);
- put_user_ns(old_ns);
- return new_ns;
+ return 0;
}
void free_user_ns(struct kref *kref)
@@ -70,7 +65,7 @@ void free_user_ns(struct kref *kref)
struct user_namespace *ns;
ns = container_of(kref, struct user_namespace, kref);
- release_uids(ns);
+ free_uid(ns->creator);
kfree(ns);
}
EXPORT_SYMBOL(free_user_ns);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d4dc69ddebd7..2f445833ae37 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
static int singlethread_cpu __read_mostly;
-static cpumask_t cpu_singlethread_map __read_mostly;
+static const struct cpumask *cpu_singlethread_map __read_mostly;
/*
* _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
* flushes cwq->worklist. This means that flush_workqueue/wait_on_work
@@ -81,24 +81,24 @@ static cpumask_t cpu_singlethread_map __read_mostly;
* use cpu_possible_map, the cpumask below is more a documentation
* than optimization.
*/
-static cpumask_t cpu_populated_map __read_mostly;
+static cpumask_var_t cpu_populated_map __read_mostly;
/* If it's single threaded, it isn't in the list of workqueues. */
-static inline int is_single_threaded(struct workqueue_struct *wq)
+static inline int is_wq_single_threaded(struct workqueue_struct *wq)
{
return wq->singlethread;
}
-static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
+static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
{
- return is_single_threaded(wq)
- ? &cpu_singlethread_map : &cpu_populated_map;
+ return is_wq_single_threaded(wq)
+ ? cpu_singlethread_map : cpu_populated_map;
}
static
struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
{
- if (unlikely(is_single_threaded(wq)))
+ if (unlikely(is_wq_single_threaded(wq)))
cpu = singlethread_cpu;
return per_cpu_ptr(wq->cpu_wq, cpu);
}
@@ -410,7 +410,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
*/
void flush_workqueue(struct workqueue_struct *wq)
{
- const cpumask_t *cpu_map = wq_cpu_map(wq);
+ const struct cpumask *cpu_map = wq_cpu_map(wq);
int cpu;
might_sleep();
@@ -532,7 +532,7 @@ static void wait_on_work(struct work_struct *work)
{
struct cpu_workqueue_struct *cwq;
struct workqueue_struct *wq;
- const cpumask_t *cpu_map;
+ const struct cpumask *cpu_map;
int cpu;
might_sleep();
@@ -769,7 +769,7 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
struct workqueue_struct *wq = cwq->wq;
- const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
+ const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
struct task_struct *p;
p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
@@ -903,7 +903,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
- const cpumask_t *cpu_map = wq_cpu_map(wq);
+ const struct cpumask *cpu_map = wq_cpu_map(wq);
int cpu;
cpu_maps_update_begin();
@@ -933,7 +933,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
- cpu_set(cpu, cpu_populated_map);
+ cpumask_set_cpu(cpu, cpu_populated_map);
}
undo:
list_for_each_entry(wq, &workqueues, list) {
@@ -964,7 +964,7 @@ undo:
switch (action) {
case CPU_UP_CANCELED:
case CPU_POST_DEAD:
- cpu_clear(cpu, cpu_populated_map);
+ cpumask_clear_cpu(cpu, cpu_populated_map);
}
return ret;
@@ -1017,9 +1017,11 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
void __init init_workqueues(void)
{
- cpu_populated_map = cpu_online_map;
- singlethread_cpu = first_cpu(cpu_possible_map);
- cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
+ alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
+
+ cpumask_copy(cpu_populated_map, cpu_online_mask);
+ singlethread_cpu = cpumask_first(cpu_possible_mask);
+ cpu_singlethread_map = cpumask_of(singlethread_cpu);
hotcpu_notifier(workqueue_cpu_callback, 0);
keventd_wq = create_workqueue("events");
BUG_ON(!keventd_wq);