summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/vector.S
blob: 0458a9aaba9d135867d6a4263404a8264148a7cd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/reg.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* void do_load_up_transact_altivec(struct thread_struct *thread)
 *
 * This is similar to load_up_altivec but for the transactional version of the
 * vector regs.  It doesn't mess with the task MSR or valid flags.
 * Furthermore, VEC laziness is not supported with TM currently.
 */
_GLOBAL(do_load_up_transact_altivec)
	mfmsr	r6
	oris	r5,r6,MSR_VEC@h
	MTMSRD(r5)
	isync

	li	r4,1
	stw	r4,THREAD_USED_VR(r3)

	li	r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
	lvx	vr0,r10,r3
	mtvscr	vr0
	addi	r10,r3,THREAD_TRANSACT_VRSTATE
	REST_32VRS(0,r4,r10)

	/* Disable VEC again. */
	MTMSRD(r6)
	isync

	blr
#endif

/*
 * Load state from memory into VMX registers including VSCR.
 * Assumes the caller has enabled VMX in the MSR.
 */
_GLOBAL(load_vr_state)
	li	r4,VRSTATE_VSCR
	lvx	vr0,r4,r3
	mtvscr	vr0
	REST_32VRS(0,r4,r3)
	blr

/*
 * Store VMX state into memory, including VSCR.
 * Assumes the caller has enabled VMX in the MSR.
 */
_GLOBAL(store_vr_state)
	SAVE_32VRS(0, r4, r3)
	mfvscr	vr0
	li	r4, VRSTATE_VSCR
	stvx	vr0, r4, r3
	blr

/*
 * Disable VMX for the task which had it previously,
 * and save its vector registers in its thread_struct.
 * Enables the VMX for use in the kernel on return.
 * On SMP we know the VMX is free, since we give it up every
 * switch (ie, no lazy save of the vector registers).
 *
 * Note that on 32-bit this can only use registers that will be
 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
 */
_GLOBAL(load_up_altivec)
	mfmsr	r5			/* grab the current MSR */
	oris	r5,r5,MSR_VEC@h
	MTMSRD(r5)			/* enable use of AltiVec now */
	isync

/*
 * For SMP, we don't do lazy VMX switching because it just gets too
 * horrendously complex, especially when a task switches from one CPU
 * to another.  Instead we call giveup_altvec in switch_to.
 * VRSAVE isn't dealt with here, that is done in the normal context
 * switch code. Note that we could rely on vrsave value to eventually
 * avoid saving all of the VREGs here...
 */
#ifndef CONFIG_SMP
	LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
	toreal(r3)
	PPC_LL	r4,ADDROFF(last_task_used_altivec)(r3)
	PPC_LCMPI	0,r4,0
	beq	1f

	/* Save VMX state to last_task_used_altivec's THREAD struct */
	toreal(r4)
	addi	r4,r4,THREAD
	addi	r6,r4,THREAD_VRSTATE
	SAVE_32VRS(0,r5,r6)
	mfvscr	vr0
	li	r10,VRSTATE_VSCR
	stvx	vr0,r10,r6
	/* Disable VMX for last_task_used_altivec */
	PPC_LL	r5,PT_REGS(r4)
	toreal(r5)
	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	lis	r10,MSR_VEC@h
	andc	r4,r4,r10
	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */

	/* Hack: if we get an altivec unavailable trap with VRSAVE
	 * set to all zeros, we assume this is a broken application
	 * that fails to set it properly, and thus we switch it to
	 * all 1's
	 */
	mfspr	r4,SPRN_VRSAVE
	cmpwi	0,r4,0
	bne+	1f
	li	r4,-1
	mtspr	SPRN_VRSAVE,r4
1:
	/* enable use of VMX after return */
#ifdef CONFIG_PPC32
	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
	oris	r9,r9,MSR_VEC@h
#else
	ld	r4,PACACURRENT(r13)
	addi	r5,r4,THREAD		/* Get THREAD */
	oris	r12,r12,MSR_VEC@h
	std	r12,_MSR(r1)
#endif
	addi	r6,r5,THREAD_VRSTATE
	li	r4,1
	li	r10,VRSTATE_VSCR
	stw	r4,THREAD_USED_VR(r5)
	lvx	vr0,r10,r6
	mtvscr	vr0
	REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
	/* Update last_task_used_altivec to 'current' */
	subi	r4,r5,THREAD		/* Back to 'current' */
	fromreal(r4)
	PPC_STL	r4,ADDROFF(last_task_used_altivec)(r3)
#endif /* CONFIG_SMP */
	/* restore registers and return */
	blr

_GLOBAL(giveup_altivec_notask)
	mfmsr	r3
	andis.	r4,r3,MSR_VEC@h
	bnelr				/* Already enabled? */
	oris	r3,r3,MSR_VEC@h
	SYNC
	MTMSRD(r3)			/* enable use of VMX now */
	isync
	blr

/*
 * giveup_altivec(tsk)
 * Disable VMX for the task given as the argument,
 * and save the vector registers in its thread_struct.
 * Enables the VMX for use in the kernel on return.
 */
_GLOBAL(giveup_altivec)
	mfmsr	r5
	oris	r5,r5,MSR_VEC@h
	SYNC
	MTMSRD(r5)			/* enable use of VMX now */
	isync
	PPC_LCMPI	0,r3,0
	beqlr				/* if no previous owner, done */
	addi	r3,r3,THREAD		/* want THREAD of task */
	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
	PPC_LL	r5,PT_REGS(r3)
	PPC_LCMPI	0,r7,0
	bne	2f
	addi	r7,r3,THREAD_VRSTATE
2:	PPC_LCMPI	0,r5,0
	SAVE_32VRS(0,r4,r7)
	mfvscr	vr0
	li	r4,VRSTATE_VSCR
	stvx	vr0,r4,r7
	beq	1f
	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	lis	r3,(MSR_VEC|MSR_VSX)@h
FTR_SECTION_ELSE
	lis	r3,MSR_VEC@h
ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
#else
	lis	r3,MSR_VEC@h
#endif
	andc	r4,r4,r3		/* disable FP for previous task */
	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
	li	r5,0
	LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
	PPC_STL	r5,ADDROFF(last_task_used_altivec)(r4)
#endif /* CONFIG_SMP */
	blr

#ifdef CONFIG_VSX

#ifdef CONFIG_PPC32
#error This asm code isn't ready for 32-bit kernels
#endif

/*
 * load_up_vsx(unused, unused, tsk)
 * Disable VSX for the task which had it previously,
 * and save its vector registers in its thread_struct.
 * Reuse the fp and vsx saves, but first check to see if they have
 * been saved already.
 */
_GLOBAL(load_up_vsx)
/* Load FP and VSX registers if they haven't been done yet */
	andi.	r5,r12,MSR_FP
	beql+	load_up_fpu		/* skip if already loaded */
	andis.	r5,r12,MSR_VEC@h
	beql+	load_up_altivec		/* skip if already loaded */

#ifndef CONFIG_SMP
	ld	r3,last_task_used_vsx@got(r2)
	ld	r4,0(r3)
	cmpdi	0,r4,0
	beq	1f
	/* Disable VSX for last_task_used_vsx */
	addi	r4,r4,THREAD
	ld	r5,PT_REGS(r4)
	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	lis	r6,MSR_VSX@h
	andc	r6,r4,r6
	std	r6,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
	ld	r4,PACACURRENT(r13)
	addi	r4,r4,THREAD		/* Get THREAD */
	li	r6,1
	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
	/* enable use of VSX after return */
	oris	r12,r12,MSR_VSX@h
	std	r12,_MSR(r1)
#ifndef CONFIG_SMP
	/* Update last_task_used_vsx to 'current' */
	ld	r4,PACACURRENT(r13)
	std	r4,0(r3)
#endif /* CONFIG_SMP */
	b	fast_exception_return

/*
 * __giveup_vsx(tsk)
 * Disable VSX for the task given as the argument.
 * Does NOT save vsx registers.
 * Enables the VSX for use in the kernel on return.
 */
_GLOBAL(__giveup_vsx)
	mfmsr	r5
	oris	r5,r5,MSR_VSX@h
	mtmsrd	r5			/* enable use of VSX now */
	isync

	cmpdi	0,r3,0
	beqlr-				/* if no previous owner, done */
	addi	r3,r3,THREAD		/* want THREAD of task */
	ld	r5,PT_REGS(r3)
	cmpdi	0,r5,0
	beq	1f
	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
	lis	r3,MSR_VSX@h
	andc	r4,r4,r3		/* disable VSX for previous task */
	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
	li	r5,0
	ld	r4,last_task_used_vsx@got(r2)
	std	r5,0(r4)
#endif /* CONFIG_SMP */
	blr

#endif /* CONFIG_VSX */


/*
 * The routines below are in assembler so we can closely control the
 * usage of floating-point registers.  These routines must be called
 * with preempt disabled.
 */
#ifdef CONFIG_PPC32
	.data
fpzero:
	.long	0
fpone:
	.long	0x3f800000	/* 1.0 in single-precision FP */
fphalf:
	.long	0x3f000000	/* 0.5 in single-precision FP */

#define LDCONST(fr, name)	\
	lis	r11,name@ha;	\
	lfs	fr,name@l(r11)
#else

	.section ".toc","aw"
fpzero:
	.tc	FD_0_0[TC],0
fpone:
	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
fphalf:
	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */

#define LDCONST(fr, name)	\
	lfd	fr,name@toc(r2)
#endif

	.text
/*
 * Internal routine to enable floating point and set FPSCR to 0.
 * Don't call it from C; it doesn't use the normal calling convention.
 */
fpenable:
#ifdef CONFIG_PPC32
	stwu	r1,-64(r1)
#else
	stdu	r1,-64(r1)
#endif
	mfmsr	r10
	ori	r11,r10,MSR_FP
	mtmsr	r11
	isync
	stfd	fr0,24(r1)
	stfd	fr1,16(r1)
	stfd	fr31,8(r1)
	LDCONST(fr1, fpzero)
	mffs	fr31
	MTFSF_L(fr1)
	blr

fpdisable:
	mtlr	r12
	MTFSF_L(fr31)
	lfd	fr31,8(r1)
	lfd	fr1,16(r1)
	lfd	fr0,24(r1)
	mtmsr	r10
	isync
	addi	r1,r1,64
	blr

/*
 * Vector add, floating point.
 */
_GLOBAL(vaddfp)
	mflr	r12
	bl	fpenable
	li	r0,4
	mtctr	r0
	li	r6,0
1:	lfsx	fr0,r4,r6
	lfsx	fr1,r5,r6
	fadds	fr0,fr0,fr1
	stfsx	fr0,r3,r6
	addi	r6,r6,4
	bdnz	1b
	b	fpdisable

/*
 * Vector subtract, floating point.
 */
_GLOBAL(vsubfp)
	mflr	r12
	bl	fpenable
	li	r0,4
	mtctr	r0
	li	r6,0
1:	lfsx	fr0,r4,r6
	lfsx	fr1,r5,r6
	fsubs	fr0,fr0,fr1
	stfsx	fr0,r3,r6
	addi	r6,r6,4
	bdnz	1b
	b	fpdisable

/*
 * Vector multiply and add, floating point.
 */
_GLOBAL(vmaddfp)
	mflr	r12
	bl	fpenable
	stfd	fr2,32(r1)
	li	r0,4
	mtctr	r0
	li	r7,0
1:	lfsx	fr0,r4,r7
	lfsx	fr1,r5,r7
	lfsx	fr2,r6,r7
	fmadds	fr0,fr0,fr2,fr1
	stfsx	fr0,r3,r7
	addi	r7,r7,4
	bdnz	1b
	lfd	fr2,32(r1)
	b	fpdisable

/*
 * Vector negative multiply and subtract, floating point.
 */
_GLOBAL(vnmsubfp)
	mflr	r12
	bl	fpenable
	stfd	fr2,32(r1)
	li	r0,4
	mtctr	r0
	li	r7,0
1:	lfsx	fr0,r4,r7
	lfsx	fr1,r5,r7
	lfsx	fr2,r6,r7
	fnmsubs	fr0,fr0,fr2,fr1
	stfsx	fr0,r3,r7
	addi	r7,r7,4
	bdnz	1b
	lfd	fr2,32(r1)
	b	fpdisable

/*
 * Vector reciprocal estimate.  We just compute 1.0/x.
 * r3 -> destination, r4 -> source.
 */
_GLOBAL(vrefp)
	mflr	r12
	bl	fpenable
	li	r0,4
	LDCONST(fr1, fpone)
	mtctr	r0
	li	r6,0
1:	lfsx	fr0,r4,r6
	fdivs	fr0,fr1,fr0
	stfsx	fr0,r3,r6
	addi	r6,r6,4
	bdnz	1b
	b	fpdisable

/*
 * Vector reciprocal square-root estimate, floating point.
 * We use the frsqrte instruction for the initial estimate followed
 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
 * r3 -> destination, r4 -> source.
 */
_GLOBAL(vrsqrtefp)
	mflr	r12
	bl	fpenable
	stfd	fr2,32(r1)
	stfd	fr3,40(r1)
	stfd	fr4,48(r1)
	stfd	fr5,56(r1)
	li	r0,4
	LDCONST(fr4, fpone)
	LDCONST(fr5, fphalf)
	mtctr	r0
	li	r6,0
1:	lfsx	fr0,r4,r6
	frsqrte	fr1,fr0		/* r = frsqrte(s) */
	fmuls	fr3,fr1,fr0	/* r * s */
	fmuls	fr2,fr1,fr5	/* r * 0.5 */
	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
	fmuls	fr3,fr1,fr0	/* r * s */
	fmuls	fr2,fr1,fr5	/* r * 0.5 */
	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
	stfsx	fr1,r3,r6
	addi	r6,r6,4
	bdnz	1b
	lfd	fr5,56(r1)
	lfd	fr4,48(r1)
	lfd	fr3,40(r1)
	lfd	fr2,32(r1)
	b	fpdisable