summaryrefslogtreecommitdiff
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c64
1 files changed, 22 insertions, 42 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 420232a1fbba..c820b0310a12 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -125,13 +125,13 @@ int ftrace_dump_on_oops;
static int tracing_set_tracer(const char *buf);
-#define BOOTUP_TRACER_SIZE 100
-static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
+#define MAX_TRACER_SIZE 100
+static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
static int __init set_ftrace(char *str)
{
- strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
+ strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
ring_buffer_expanded = 1;
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly;
static struct tracer *current_trace __read_mostly;
/*
- * max_tracer_type_len is used to simplify the allocating of
- * buffers to read userspace tracer names. We keep track of
- * the longest tracer name registered.
- */
-static int max_tracer_type_len;
-
-/*
* trace_types_lock is used to protect the trace_types list.
* This lock is also used to keep user access serialized.
* Accesses from userspace will grab this lock while userspace
@@ -422,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
/* read the non-space input */
while (cnt && !isspace(ch)) {
- if (parser->idx < parser->size)
+ if (parser->idx < parser->size - 1)
parser->buffer[parser->idx++] = ch;
else {
ret = -EINVAL;
@@ -625,7 +618,6 @@ __releases(kernel_lock)
__acquires(kernel_lock)
{
struct tracer *t;
- int len;
int ret = 0;
if (!type->name) {
@@ -633,6 +625,11 @@ __acquires(kernel_lock)
return -1;
}
+ if (strlen(type->name) > MAX_TRACER_SIZE) {
+ pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
+ return -1;
+ }
+
/*
* When this gets called we hold the BKL which means that
* preemption is disabled. Various trace selftests however
@@ -647,7 +644,7 @@ __acquires(kernel_lock)
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
- pr_info("Trace %s already registered\n",
+ pr_info("Tracer %s already registered\n",
type->name);
ret = -1;
goto out;
@@ -698,9 +695,6 @@ __acquires(kernel_lock)
type->next = trace_types;
trace_types = type;
- len = strlen(type->name);
- if (len > max_tracer_type_len)
- max_tracer_type_len = len;
out:
tracing_selftest_running = false;
@@ -709,7 +703,7 @@ __acquires(kernel_lock)
if (ret || !default_bootup_tracer)
goto out_unlock;
- if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
+ if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
goto out_unlock;
printk(KERN_INFO "Starting tracer '%s'\n", type->name);
@@ -731,14 +725,13 @@ __acquires(kernel_lock)
void unregister_tracer(struct tracer *type)
{
struct tracer **t;
- int len;
mutex_lock(&trace_types_lock);
for (t = &trace_types; *t; t = &(*t)->next) {
if (*t == type)
goto found;
}
- pr_info("Trace %s not registered\n", type->name);
+ pr_info("Tracer %s not registered\n", type->name);
goto out;
found:
@@ -751,17 +744,7 @@ void unregister_tracer(struct tracer *type)
current_trace->stop(&global_trace);
current_trace = &nop_trace;
}
-
- if (strlen(type->name) != max_tracer_type_len)
- goto out;
-
- max_tracer_type_len = 0;
- for (t = &trace_types; *t; t = &(*t)->next) {
- len = strlen((*t)->name);
- if (len > max_tracer_type_len)
- max_tracer_type_len = len;
- }
- out:
+out:
mutex_unlock(&trace_types_lock);
}
@@ -1410,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr,
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
- return trace_array_printk(&global_trace, ip, fmt, args);
+ return trace_array_vprintk(&global_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
@@ -1966,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations tracer_seq_ops = {
+static const struct seq_operations tracer_seq_ops = {
.start = s_start,
.next = s_next,
.stop = s_stop,
@@ -2001,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
if (current_trace)
*iter->trace = *current_trace;
- if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
- cpumask_clear(iter->started);
-
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
@@ -2180,7 +2161,7 @@ static int t_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations show_traces_seq_ops = {
+static const struct seq_operations show_traces_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
@@ -2610,7 +2591,7 @@ static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- char buf[max_tracer_type_len+2];
+ char buf[MAX_TRACER_SIZE+2];
int r;
mutex_lock(&trace_types_lock);
@@ -2760,15 +2741,15 @@ static ssize_t
tracing_set_trace_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- char buf[max_tracer_type_len+1];
+ char buf[MAX_TRACER_SIZE+1];
int i;
size_t ret;
int err;
ret = cnt;
- if (cnt > max_tracer_type_len)
- cnt = max_tracer_type_len;
+ if (cnt > MAX_TRACER_SIZE)
+ cnt = MAX_TRACER_SIZE;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
@@ -4406,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
- if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask;
/* To save memory, keep the ring buffer size to its minimum */
@@ -4417,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
- cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(ring_buf_size,