summaryrefslogtreecommitdiff
path: root/arch/tile/kernel/ftrace.c
blob: 0c0996175b1ed613cf6fe4d3e89cec85ea5c6a6f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
/*
 * Copyright 2012 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 *
 * TILE-Gx specific ftrace support
 */

#include <linux/ftrace.h>
#include <linux/uaccess.h>

#include <asm/cacheflush.h>
#include <asm/ftrace.h>
#include <asm/sections.h>

#include <arch/opcode.h>

#ifdef CONFIG_DYNAMIC_FTRACE

static inline tilegx_bundle_bits NOP(void)
{
	return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
		create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
		create_Opcode_X0(RRR_0_OPCODE_X0) |
		create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
		create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
		create_Opcode_X1(RRR_0_OPCODE_X1);
}

static int machine_stopped __read_mostly;

int ftrace_arch_code_modify_prepare(void)
{
	machine_stopped = 1;
	return 0;
}

int ftrace_arch_code_modify_post_process(void)
{
	flush_icache_range(0, CHIP_L1I_CACHE_SIZE());
	machine_stopped = 0;
	return 0;
}

/*
 * Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic
 * tracer just add one cycle overhead to every kernel function when disabled.
 */
static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
				       bool link)
{
	tilegx_bundle_bits opcode_x0, opcode_x1;
	long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;

	if (link) {
		/* opcode: jal addr */
		opcode_x1 =
			create_Opcode_X1(JUMP_OPCODE_X1) |
			create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
			create_JumpOff_X1(pcrel_by_instr);
	} else {
		/* opcode: j addr */
		opcode_x1 =
			create_Opcode_X1(JUMP_OPCODE_X1) |
			create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
			create_JumpOff_X1(pcrel_by_instr);
	}

	/*
	 * Also put { move r10, lr; jal ftrace_stub } in a bundle, which
	 * is used to replace the instruction in address ftrace_call.
	 */
	if (addr == FTRACE_ADDR || addr == (unsigned long)ftrace_stub) {
		/* opcode: or r10, lr, zero */
		opcode_x0 =
			create_Dest_X0(10) |
			create_SrcA_X0(TREG_LR) |
			create_SrcB_X0(TREG_ZERO) |
			create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) |
			create_Opcode_X0(RRR_0_OPCODE_X0);
	} else {
		/* opcode: fnop */
		opcode_x0 =
			create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
			create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
			create_Opcode_X0(RRR_0_OPCODE_X0);
	}

	return opcode_x1 | opcode_x0;
}

static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
	return NOP();
}

static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
	return ftrace_gen_branch(pc, addr, true);
}

static int ftrace_modify_code(unsigned long pc, unsigned long old,
			      unsigned long new)
{
	unsigned long pc_wr;

	/* Check if the address is in kernel text space and module space. */
	if (!kernel_text_address(pc))
		return -EINVAL;

	/* Operate on writable kernel text mapping. */
	pc_wr = pc - MEM_SV_START + PAGE_OFFSET;

	if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
		return -EPERM;

	smp_wmb();

	if (!machine_stopped && num_online_cpus() > 1)
		flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);

	return 0;
}

int ftrace_update_ftrace_func(ftrace_func_t func)
{
	unsigned long pc, old;
	unsigned long new;
	int ret;

	pc = (unsigned long)&ftrace_call;
	memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
	new = ftrace_call_replace(pc, (unsigned long)func);

	ret = ftrace_modify_code(pc, old, new);

	return ret;
}

int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
	unsigned long new, old;
	unsigned long ip = rec->ip;

	old = ftrace_nop_replace(rec);
	new = ftrace_call_replace(ip, addr);

	return ftrace_modify_code(rec->ip, old, new);
}

int ftrace_make_nop(struct module *mod,
		    struct dyn_ftrace *rec, unsigned long addr)
{
	unsigned long ip = rec->ip;
	unsigned long old;
	unsigned long new;
	int ret;

	old = ftrace_call_replace(ip, addr);
	new = ftrace_nop_replace(rec);
	ret = ftrace_modify_code(ip, old, new);

	return ret;
}

int __init ftrace_dyn_arch_init(void)
{
	return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
			   unsigned long frame_pointer)
{
	unsigned long return_hooker = (unsigned long) &return_to_handler;
	struct ftrace_graph_ent trace;
	unsigned long old;
	int err;

	if (unlikely(atomic_read(&current->tracing_graph_pause)))
		return;

	old = *parent;
	*parent = return_hooker;

	err = ftrace_push_return_trace(old, self_addr, &trace.depth,
				       frame_pointer);
	if (err == -EBUSY) {
		*parent = old;
		return;
	}

	trace.func = self_addr;

	/* Only trace if the calling function expects to */
	if (!ftrace_graph_entry(&trace)) {
		current->curr_ret_stack--;
		*parent = old;
	}
}

#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_graph_call;

static int __ftrace_modify_caller(unsigned long *callsite,
				  void (*func) (void), bool enable)
{
	unsigned long caller_fn = (unsigned long) func;
	unsigned long pc = (unsigned long) callsite;
	unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
	unsigned long nop = NOP();
	unsigned long old = enable ? nop : branch;
	unsigned long new = enable ? branch : nop;

	return ftrace_modify_code(pc, old, new);
}

static int ftrace_modify_graph_caller(bool enable)
{
	int ret;

	ret = __ftrace_modify_caller(&ftrace_graph_call,
				     ftrace_graph_caller,
				     enable);

	return ret;
}

int ftrace_enable_ftrace_graph_caller(void)
{
	return ftrace_modify_graph_caller(true);
}

int ftrace_disable_ftrace_graph_caller(void)
{
	return ftrace_modify_graph_caller(false);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */