summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_functions.c
blob: 3a5fa08cedb0a78caacd7ff3d96cf3333f4e04f0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
/*
 * ring buffer based function tracer
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
 *  Copyright (C) 2004 William Lee Irwin III
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>

#include "trace.h"

static struct trace_array	*func_trace;

static void start_function_trace(struct trace_array *tr)
{
	tr->cpu = get_cpu();
	tracing_reset_online_cpus(tr);
	put_cpu();

	tracing_start_cmdline_record();
	tracing_start_function_trace();
}

static void stop_function_trace(struct trace_array *tr)
{
	tracing_stop_function_trace();
	tracing_stop_cmdline_record();
}

static int function_trace_init(struct trace_array *tr)
{
	func_trace = tr;
	start_function_trace(tr);
	return 0;
}

static void function_trace_reset(struct trace_array *tr)
{
	stop_function_trace(tr);
}

static void function_trace_start(struct trace_array *tr)
{
	tracing_reset_online_cpus(tr);
}

static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	/*
	 * Need to use raw, since this must be called before the
	 * recursive protection is performed.
	 */
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
		pc = preempt_count();
		/*
		 * skip over 5 funcs:
		 *    __ftrace_trace_stack,
		 *    __trace_stack,
		 *    function_stack_trace_call
		 *    ftrace_list_func
		 *    ftrace_call
		 */
		__trace_stack(tr, data, flags, 5, pc);
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

static struct ftrace_ops trace_stack_ops __read_mostly =
{
	.func = function_stack_trace_call,
};

/* Our two options */
enum {
	TRACE_FUNC_OPT_STACK = 0x1,
};

static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
	{ } /* Always set a last empty entry */
};

static struct tracer_flags func_flags = {
	.val = 0, /* By default: all flags disabled */
	.opts = func_opts
};

static int func_set_flag(u32 old_flags, u32 bit, int set)
{
	if (bit == TRACE_FUNC_OPT_STACK) {
		/* do nothing if already set */
		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
			return 0;

		if (set)
			register_ftrace_function(&trace_stack_ops);
		else
			unregister_ftrace_function(&trace_stack_ops);

		return 0;
	}

	return -EINVAL;
}

static struct tracer function_trace __read_mostly =
{
	.name	     = "function",
	.init	     = function_trace_init,
	.reset	     = function_trace_reset,
	.start	     = function_trace_start,
	.flags		= &func_flags,
	.set_flag	= func_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_function,
#endif
};

static __init int init_function_trace(void)
{
	return register_tracer(&function_trace);
}

device_initcall(init_function_trace);