summaryrefslogtreecommitdiff
path: root/arch/alpha/lib/ev6-memchr.S
blob: 1a5f71b9d8b10286f324b2e13e9be79ddb1d4abb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
/*
 * arch/alpha/lib/ev6-memchr.S
 *
 * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
 *
 * Finds characters in a memory area.  Optimized for the Alpha:
 *
 *    - memory accessed as aligned quadwords only
 *    - uses cmpbge to compare 8 bytes in parallel
 *    - does binary search to find 0 byte in last
 *      quadword (HAKMEM needed 12 instructions to
 *      do this instead of the 9 instructions that
 *      binary search needs).
 *
 * For correctness consider that:
 *
 *    - only minimum number of quadwords may be accessed
 *    - the third argument is an unsigned long
 *
 * Much of the information about 21264 scheduling/coding comes from:
 *	Compiler Writer's Guide for the Alpha 21264
 *	abbreviated as 'CWG' in other comments here
 *	ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
 * Scheduling notation:
 *	E	- either cluster
 *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
 *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
 * Try not to change the actual algorithm if possible for consistency.
 */

        .set noreorder
        .set noat

	.align	4
	.globl memchr
	.ent memchr
memchr:
	.frame $30,0,$26,0
	.prologue 0

	# Hack -- if someone passes in (size_t)-1, hoping to just
	# search til the end of the address space, we will overflow
	# below when we find the address of the last byte.  Given
	# that we will never have a 56-bit address space, cropping
	# the length is the easiest way to avoid trouble.
	zap	$18, 0x80, $5	# U : Bound length
	beq	$18, $not_found	# U :
        ldq_u   $1, 0($16)	# L : load first quadword Latency=3
	and	$17, 0xff, $17	# E : L L U U : 00000000000000ch

	insbl	$17, 1, $2	# U : 000000000000ch00
	cmpult	$18, 9, $4	# E : small (< 1 quad) string?
	or	$2, $17, $17	# E : 000000000000chch
        lda     $3, -1($31)	# E : U L L U

	sll	$17, 16, $2	# U : 00000000chch0000
	addq	$16, $5, $5	# E : Max search address
	or	$2, $17, $17	# E : 00000000chchchch
	sll	$17, 32, $2	# U : U L L U : chchchch00000000

	or	$2, $17, $17	# E : chchchchchchchch
	extql	$1, $16, $7	# U : $7 is upper bits
	beq	$4, $first_quad	# U :
	ldq_u	$6, -1($5)	# L : L U U L : eight or less bytes to search Latency=3

	extqh	$6, $16, $6	# U : 2 cycle stall for $6
	mov	$16, $0		# E :
	nop			# E :
	or	$7, $6, $1	# E : L U L U $1 = quadword starting at $16

	# Deal with the case where at most 8 bytes remain to be searched
	# in $1.  E.g.:
	#	$18 = 6
	#	$1 = ????c6c5c4c3c2c1
$last_quad:
	negq	$18, $6		# E :
        xor	$17, $1, $1	# E :
	srl	$3, $6, $6	# U : $6 = mask of $18 bits set
        cmpbge  $31, $1, $2	# E : L U L U

	nop
	nop
	and	$2, $6, $2	# E :
        beq     $2, $not_found	# U : U L U L

$found_it:
#ifdef CONFIG_ALPHA_EV67
	/*
	 * Since we are guaranteed to have set one of the bits, we don't
	 * have to worry about coming back with a 0x40 out of cttz...
	 */
	cttz	$2, $3		# U0 :
	addq	$0, $3, $0	# E : All done
	nop			# E :
	ret			# L0 : L U L U
#else
	/*
	 * Slow and clunky.  It can probably be improved.
	 * An exercise left for others.
	 */
        negq    $2, $3		# E :
        and     $2, $3, $2	# E :
        and     $2, 0x0f, $1	# E :
        addq    $0, 4, $3	# E :

        cmoveq  $1, $3, $0	# E : Latency 2, extra map cycle
	nop			# E : keep with cmov
        and     $2, 0x33, $1	# E :
        addq    $0, 2, $3	# E : U L U L : 2 cycle stall on $0

        cmoveq  $1, $3, $0	# E : Latency 2, extra map cycle
	nop			# E : keep with cmov
        and     $2, 0x55, $1	# E :
        addq    $0, 1, $3	# E : U L U L : 2 cycle stall on $0

        cmoveq  $1, $3, $0	# E : Latency 2, extra map cycle
	nop
	nop
	ret			# L0 : L U L U
#endif

	# Deal with the case where $18 > 8 bytes remain to be
	# searched.  $16 may not be aligned.
	.align 4
$first_quad:
	andnot	$16, 0x7, $0	# E :
        insqh   $3, $16, $2	# U : $2 = 0000ffffffffffff ($16<0:2> ff)
        xor	$1, $17, $1	# E :
	or	$1, $2, $1	# E : U L U L $1 = ====ffffffffffff

        cmpbge  $31, $1, $2	# E :
        bne     $2, $found_it	# U :
	# At least one byte left to process.
	ldq	$1, 8($0)	# L :
	subq	$5, 1, $18	# E : U L U L

	addq	$0, 8, $0	# E :
	# Make $18 point to last quad to be accessed (the
	# last quad may or may not be partial).
	andnot	$18, 0x7, $18	# E :
	cmpult	$0, $18, $2	# E :
	beq	$2, $final	# U : U L U L

	# At least two quads remain to be accessed.

	subq	$18, $0, $4	# E : $4 <- nr quads to be processed
	and	$4, 8, $4	# E : odd number of quads?
	bne	$4, $odd_quad_count # U :
	# At least three quads remain to be accessed
	mov	$1, $4		# E : L U L U : move prefetched value to correct reg

	.align	4
$unrolled_loop:
	ldq	$1, 8($0)	# L : prefetch $1
	xor	$17, $4, $2	# E :
	cmpbge	$31, $2, $2	# E :
	bne	$2, $found_it	# U : U L U L

	addq	$0, 8, $0	# E :
	nop			# E :
	nop			# E :
	nop			# E :

$odd_quad_count:
	xor	$17, $1, $2	# E :
	ldq	$4, 8($0)	# L : prefetch $4
	cmpbge	$31, $2, $2	# E :
	addq	$0, 8, $6	# E :

	bne	$2, $found_it	# U :
	cmpult	$6, $18, $6	# E :
	addq	$0, 8, $0	# E :
	nop			# E :

	bne	$6, $unrolled_loop # U :
	mov	$4, $1		# E : move prefetched value into $1
	nop			# E :
	nop			# E :

$final:	subq	$5, $0, $18	# E : $18 <- number of bytes left to do
	nop			# E :
	nop			# E :
	bne	$18, $last_quad	# U :

$not_found:
	mov	$31, $0		# E :
	nop			# E :
	nop			# E :
	ret			# L0 :

        .end memchr