1 /*
2 * Copyright (c) 2013 ARM Ltd
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the company may not be used to endorse or promote
14 * products derived from this software without specific prior written
15 * permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
29 #include <machine/cpu-features.h>
30 #include <machine/asm.h>
32 #ifdef __ARMEB__
33 #define S2LOMEM lsl
34 #define S2LOMEMEQ lsleq
35 #define S2HIMEM lsr
36 #define MSB 0x000000ff
37 #define LSB 0xff000000
38 #define BYTE0_OFFSET 24
39 #define BYTE1_OFFSET 16
40 #define BYTE2_OFFSET 8
41 #define BYTE3_OFFSET 0
42 #else /* not __ARMEB__ */
43 #define S2LOMEM lsr
44 #define S2LOMEMEQ lsreq
45 #define S2HIMEM lsl
46 #define BYTE0_OFFSET 0
47 #define BYTE1_OFFSET 8
48 #define BYTE2_OFFSET 16
49 #define BYTE3_OFFSET 24
50 #define MSB 0xff000000
51 #define LSB 0x000000ff
52 #endif /* not __ARMEB__ */
54 .syntax unified
56 #if defined (__thumb__)
57 .thumb
58 .thumb_func
59 #endif
61 ENTRY(strcmp)
62 /* Use LDRD whenever possible. */
64 /* The main thing to look out for when comparing large blocks is that
65 the loads do not cross a page boundary when loading past the index
66 of the byte with the first difference or the first string-terminator.
68 For example, if the strings are identical and the string-terminator
69 is at index k, byte by byte comparison will not load beyond address
70 s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
71 k; double word - up to 7 bytes. If the load of these bytes crosses
72 a page boundary, it might cause a memory fault (if the page is not mapped)
73 that would not have happened in byte by byte comparison.
75 If an address is (double) word aligned, then a load of a (double) word
76 from that address will not cross a page boundary.
77 Therefore, the algorithm below considers word and double-word alignment
78 of strings separately. */
80 /* High-level description of the algorithm.
82 * The fast path: if both strings are double-word aligned,
83 use LDRD to load two words from each string in every loop iteration.
84 * If the strings have the same offset from a word boundary,
85 use LDRB to load and compare byte by byte until
86 the first string is aligned to a word boundary (at most 3 bytes).
87 This is optimized for quick return on short unaligned strings.
88 * If the strings have the same offset from a double-word boundary,
89 use LDRD to load two words from each string in every loop iteration, as in the fast path.
90 * If the strings do not have the same offset from a double-word boundary,
91 load a word from the second string before the loop to initialize the queue.
92 Use LDRD to load two words from every string in every loop iteration.
93 Inside the loop, load the second word from the second string only after comparing
94 the first word, using the queued value, to guarantee safety across page boundaries.
95 * If the strings do not have the same offset from a word boundary,
96 use LDR and a shift queue. Order of loads and comparisons matters,
97 similarly to the previous case.
99 * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
100 * The only difference between ARM and Thumb modes is the use of CBZ instruction.
101 * The only difference between big and little endian is the use of REV in little endian
102 to compute the return value, instead of MOV.
103 */
105 .macro m_cbz reg label
106 #ifdef __thumb2__
107 cbz \reg, \label
108 #else /* not defined __thumb2__ */
109 cmp \reg, #0
110 beq \label
111 #endif /* not defined __thumb2__ */
112 .endm /* m_cbz */
114 .macro m_cbnz reg label
115 #ifdef __thumb2__
116 cbnz \reg, \label
117 #else /* not defined __thumb2__ */
118 cmp \reg, #0
119 bne \label
120 #endif /* not defined __thumb2__ */
121 .endm /* m_cbnz */
123 .macro init
124 /* Macro to save temporary registers and prepare magic values. */
125 subs sp, sp, #16
126 .cfi_def_cfa_offset 16
127 strd r4, r5, [sp, #8]
128 .cfi_rel_offset r4, 0
129 .cfi_rel_offset r5, 4
130 strd r6, r7, [sp]
131 .cfi_rel_offset r6, 8
132 .cfi_rel_offset r7, 12
133 mvn r6, #0 /* all F */
134 mov r7, #0 /* all 0 */
135 .endm /* init */
137 .macro magic_compare_and_branch w1 w2 label
138 /* Macro to compare registers w1 and w2 and conditionally branch to label. */
139 cmp \w1, \w2 /* Are w1 and w2 the same? */
140 magic_find_zero_bytes \w1
141 it eq
142 cmpeq ip, #0 /* Is there a zero byte in w1? */
143 bne \label
144 .endm /* magic_compare_and_branch */
146 .macro magic_find_zero_bytes w1
147 /* Macro to find all-zero bytes in w1, result is in ip. */
148 uadd8 ip, \w1, r6
149 sel ip, r7, r6
150 .endm /* magic_find_zero_bytes */
152 .macro setup_return w1 w2
153 #ifdef __ARMEB__
154 mov r1, \w1
155 mov r2, \w2
156 #else /* not __ARMEB__ */
157 rev r1, \w1
158 rev r2, \w2
159 #endif /* not __ARMEB__ */
160 .endm /* setup_return */
162 .cfi_startproc
163 pld [r0, #0]
164 pld [r1, #0]
166 /* Are both strings double-word aligned? */
167 orr ip, r0, r1
168 tst ip, #7
169 bne .L_do_align
171 /* Fast path. */
172 .save {r4-r7}
173 init
175 .L_doubleword_aligned:
177 /* Get here when the strings to compare are double-word aligned. */
178 /* Compare two words in every iteration. */
179 .p2align 2
180 2:
181 pld [r0, #16]
182 pld [r1, #16]
184 /* Load the next double-word from each string. */
185 ldrd r2, r3, [r0], #8
186 ldrd r4, r5, [r1], #8
188 magic_compare_and_branch w1=r2, w2=r4, label=.L_return_24
189 magic_compare_and_branch w1=r3, w2=r5, label=.L_return_35
190 b 2b
192 .L_do_align:
193 /* Is the first string word-aligned? */
194 ands ip, r0, #3
195 beq .L_word_aligned_r0
197 /* Fast compare byte by byte until the first string is word-aligned. */
198 /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
199 to read until the next word boundary is 4-ip. */
200 bic r0, r0, #3
201 ldr r2, [r0], #4
202 lsls ip, ip, #31
203 beq .L_byte2
204 bcs .L_byte3
206 .L_byte1:
207 ldrb ip, [r1], #1
208 uxtb r3, r2, ror #BYTE1_OFFSET
209 subs ip, r3, ip
210 bne .L_fast_return
211 m_cbz reg=r3, label=.L_fast_return
213 .L_byte2:
214 ldrb ip, [r1], #1
215 uxtb r3, r2, ror #BYTE2_OFFSET
216 subs ip, r3, ip
217 bne .L_fast_return
218 m_cbz reg=r3, label=.L_fast_return
220 .L_byte3:
221 ldrb ip, [r1], #1
222 uxtb r3, r2, ror #BYTE3_OFFSET
223 subs ip, r3, ip
224 bne .L_fast_return
225 m_cbnz reg=r3, label=.L_word_aligned_r0
227 .L_fast_return:
228 mov r0, ip
229 bx lr
231 .L_word_aligned_r0:
232 init
233 /* The first string is word-aligned. */
234 /* Is the second string word-aligned? */
235 ands ip, r1, #3
236 bne .L_strcmp_unaligned
238 .L_word_aligned:
239 /* The strings are word-aligned. */
240 /* Is the first string double-word aligned? */
241 tst r0, #4
242 beq .L_doubleword_aligned_r0
244 /* If r0 is not double-word aligned yet, align it by loading
245 and comparing the next word from each string. */
246 ldr r2, [r0], #4
247 ldr r4, [r1], #4
248 magic_compare_and_branch w1=r2 w2=r4 label=.L_return_24
250 .L_doubleword_aligned_r0:
251 /* Get here when r0 is double-word aligned. */
252 /* Is r1 doubleword_aligned? */
253 tst r1, #4
254 beq .L_doubleword_aligned
256 /* Get here when the strings to compare are word-aligned,
257 r0 is double-word aligned, but r1 is not double-word aligned. */
259 /* Initialize the queue. */
260 ldr r5, [r1], #4
262 /* Compare two words in every iteration. */
263 .p2align 2
264 3:
265 pld [r0, #16]
266 pld [r1, #16]
268 /* Load the next double-word from each string and compare. */
269 ldrd r2, r3, [r0], #8
270 magic_compare_and_branch w1=r2 w2=r5 label=.L_return_25
271 ldrd r4, r5, [r1], #8
272 magic_compare_and_branch w1=r3 w2=r4 label=.L_return_34
273 b 3b
275 .macro miscmp_word offsetlo offsethi
276 /* Macro to compare misaligned strings. */
277 /* r0, r1 are word-aligned, and at least one of the strings
278 is not double-word aligned. */
279 /* Compare one word in every loop iteration. */
280 /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
281 OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
283 /* Initialize the shift queue. */
284 ldr r5, [r1], #4
286 /* Compare one word from each string in every loop iteration. */
287 .p2align 2
288 7:
289 ldr r3, [r0], #4
290 S2LOMEM r5, r5, #\offsetlo
291 magic_find_zero_bytes w1=r3
292 cmp r7, ip, S2HIMEM #\offsetlo
293 and r2, r3, r6, S2LOMEM #\offsetlo
294 it eq
295 cmpeq r2, r5
296 bne .L_return_25
297 ldr r5, [r1], #4
298 cmp ip, #0
299 eor r3, r2, r3
300 S2HIMEM r2, r5, #\offsethi
301 it eq
302 cmpeq r3, r2
303 bne .L_return_32
304 b 7b
305 .endm /* miscmp_word */
307 .L_return_32:
308 setup_return w1=r3, w2=r2
309 b .L_do_return
310 .L_return_34:
311 setup_return w1=r3, w2=r4
312 b .L_do_return
313 .L_return_25:
314 setup_return w1=r2, w2=r5
315 b .L_do_return
316 .L_return_35:
317 setup_return w1=r3, w2=r5
318 b .L_do_return
319 .L_return_24:
320 setup_return w1=r2, w2=r4
322 .L_do_return:
324 #ifdef __ARMEB__
325 mov r0, ip
326 #else /* not __ARMEB__ */
327 rev r0, ip
328 #endif /* not __ARMEB__ */
330 /* Restore temporaries early, before computing the return value. */
331 ldrd r6, r7, [sp]
332 ldrd r4, r5, [sp, #8]
333 adds sp, sp, #16
334 .cfi_def_cfa_offset 0
335 .cfi_restore r4
336 .cfi_restore r5
337 .cfi_restore r6
338 .cfi_restore r7
340 /* There is a zero or a different byte between r1 and r2. */
341 /* r0 contains a mask of all-zero bytes in r1. */
342 /* Using r0 and not ip here because cbz requires low register. */
343 m_cbz reg=r0, label=.L_compute_return_value
344 clz r0, r0
345 /* r0 contains the number of bits on the left of the first all-zero byte in r1. */
346 rsb r0, r0, #24
347 /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
348 lsr r1, r1, r0
349 lsr r2, r2, r0
351 .L_compute_return_value:
352 movs r0, #1
353 cmp r1, r2
354 /* The return value is computed as follows.
355 If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
356 If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
357 which means r0:=r0-r0-1 and r0 is #-1 at return.
358 If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
359 which means r0:=r0-r0 and r0 is #0 at return.
360 (C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
361 it ls
362 sbcls r0, r0, r0
363 bx lr
365 /* The code from the previous version of strcmp.S handles all of the
366 * cases where the first string and seconds string cannot both be
367 * aligned to a word boundary faster than the new algorithm. See
368 * bionic/libc/arch-arm/cortex-a15/bionic/strcmp.S for the unedited
369 * version of the code.
370 */
371 .L_strcmp_unaligned:
372 wp1 .req r0
373 wp2 .req r1
374 b1 .req r2
375 w1 .req r4
376 w2 .req r5
377 t1 .req ip
378 @ r3 is scratch
380 2:
381 mov b1, #1
382 orr b1, b1, b1, lsl #8
383 orr b1, b1, b1, lsl #16
385 and t1, wp2, #3
386 bic wp2, wp2, #3
387 ldr w1, [wp1], #4
388 ldr w2, [wp2], #4
389 cmp t1, #2
390 beq 2f
391 bhi 3f
393 /* Critical inner Loop: Block with 3 bytes initial overlap */
394 .p2align 2
395 1:
396 bic t1, w1, #MSB
397 cmp t1, w2, S2LOMEM #8
398 sub r3, w1, b1
399 bic r3, r3, w1
400 bne 4f
401 ands r3, r3, b1, lsl #7
402 it eq
403 ldreq w2, [wp2], #4
404 bne 5f
405 eor t1, t1, w1
406 cmp t1, w2, S2HIMEM #24
407 bne 6f
408 ldr w1, [wp1], #4
409 b 1b
410 4:
411 S2LOMEM w2, w2, #8
412 b 8f
414 5:
415 #ifdef __ARMEB__
416 /* The syndrome value may contain false ones if the string ends
417 * with the bytes 0x01 0x00
418 */
419 tst w1, #0xff000000
420 itt ne
421 tstne w1, #0x00ff0000
422 tstne w1, #0x0000ff00
423 beq 7f
424 #else
425 bics r3, r3, #0xff000000
426 bne 7f
427 #endif
428 ldrb w2, [wp2]
429 S2LOMEM t1, w1, #24
430 #ifdef __ARMEB__
431 lsl w2, w2, #24
432 #endif
433 b 8f
435 6:
436 S2LOMEM t1, w1, #24
437 and w2, w2, #LSB
438 b 8f
440 /* Critical inner Loop: Block with 2 bytes initial overlap */
441 .p2align 2
442 2:
443 S2HIMEM t1, w1, #16
444 sub r3, w1, b1
445 S2LOMEM t1, t1, #16
446 bic r3, r3, w1
447 cmp t1, w2, S2LOMEM #16
448 bne 4f
449 ands r3, r3, b1, lsl #7
450 it eq
451 ldreq w2, [wp2], #4
452 bne 5f
453 eor t1, t1, w1
454 cmp t1, w2, S2HIMEM #16
455 bne 6f
456 ldr w1, [wp1], #4
457 b 2b
459 5:
460 #ifdef __ARMEB__
461 /* The syndrome value may contain false ones if the string ends
462 * with the bytes 0x01 0x00
463 */
464 tst w1, #0xff000000
465 it ne
466 tstne w1, #0x00ff0000
467 beq 7f
468 #else
469 lsls r3, r3, #16
470 bne 7f
471 #endif
472 ldrh w2, [wp2]
473 S2LOMEM t1, w1, #16
474 #ifdef __ARMEB__
475 lsl w2, w2, #16
476 #endif
477 b 8f
479 6:
480 S2HIMEM w2, w2, #16
481 S2LOMEM t1, w1, #16
482 4:
483 S2LOMEM w2, w2, #16
484 b 8f
486 /* Critical inner Loop: Block with 1 byte initial overlap */
487 .p2align 2
488 3:
489 and t1, w1, #LSB
490 cmp t1, w2, S2LOMEM #24
491 sub r3, w1, b1
492 bic r3, r3, w1
493 bne 4f
494 ands r3, r3, b1, lsl #7
495 it eq
496 ldreq w2, [wp2], #4
497 bne 5f
498 eor t1, t1, w1
499 cmp t1, w2, S2HIMEM #8
500 bne 6f
501 ldr w1, [wp1], #4
502 b 3b
503 4:
504 S2LOMEM w2, w2, #24
505 b 8f
506 5:
507 /* The syndrome value may contain false ones if the string ends
508 * with the bytes 0x01 0x00
509 */
510 tst w1, #LSB
511 beq 7f
512 ldr w2, [wp2], #4
513 6:
514 S2LOMEM t1, w1, #8
515 bic w2, w2, #MSB
516 b 8f
517 7:
518 mov r0, #0
520 /* Restore registers and stack. */
521 ldrd r6, r7, [sp]
522 ldrd r4, r5, [sp, #8]
523 adds sp, sp, #16
524 .cfi_def_cfa_offset 0
525 .cfi_restore r4
526 .cfi_restore r5
527 .cfi_restore r6
528 .cfi_restore r7
530 bx lr
532 8:
533 and r2, t1, #LSB
534 and r0, w2, #LSB
535 cmp r0, #1
536 it cs
537 cmpcs r0, r2
538 itt eq
539 S2LOMEMEQ t1, t1, #8
540 S2LOMEMEQ w2, w2, #8
541 beq 8b
542 sub r0, r2, r0
544 /* Restore registers and stack. */
545 ldrd r6, r7, [sp]
546 ldrd r4, r5, [sp, #8]
547 adds sp, sp, #16
549 bx lr
550 .cfi_endproc
551 END(strcmp)