powerpc: Optimized strcmp for POWER8/PPC64
Commit Message
This patch adds an optimized POWER8 strcmp using unaligned accesses.
The algorithm first check the initial 16 bytes, then align the first
function source and uses unaligned loads on second argument only.
Aditional checks for page boundaries are done for unaligned cases
(where sources alignment are different).
For unaligned cases I also tried a different approach by align
the second source argument to double and issuing aligned loads
plus internal shift/mask to isolate the bytes to compare with.
However due the additional controls and compares (one for each
splits doubleword read), I didn't see improvements over the
strategy uses on this patch. Also, the alignment check is
simples, resulting in a shorter code.
Tested on powerpc64 and powerpc64le.
--
* sysdeps/powerpc/powerpc64/multiarch/Makefile [sysdep_routines]:
Add strcmp-power8 object.
* sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
(__libc_ifunc_impl_list): Add __strcmp_power8 implementation.
* sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S: New file.
* sysdeps/powerpc/powerpc64/multiarch/strcmp.c (strcmp): Add
__strcmp_power8 implementation.
* sysdeps/powerpc/powerpc64/power8/strcmp.S: New file.
* NEWS: Update.
--
Comments
On 07-01-2015 14:30, Ondrej Bilka wrote:
>> + /* For short string up to 16 bytes, load both s1 and s2 using
>> + unaligned dwords and compare. */
>> + ld r8,0(r3)
>> + ld r10,0(r4)
>> + li r9,0
>> + cmpb r7,r8,r9
>> + cmpdi cr7,r7,0
>> + mr r9,r7
>> + bne cr7,L(null_found)
>> + cmpld cr7,r8,r10
>> + bne cr7,L(different)
>> + ld r8,8(r3)
>> + ld r10,8(r4)
>> + cmpb r9,r8,r7
>> + cmpdi cr7,r9,r0
>> + bne cr7,L(null_found)
>> + cmpld cr7,r8,r10
>> + bne cr7,L(different)
>> + addi r7,r3,16
>> + addi r4,r4,16
> It makes no sense to do two separate checks which create pretty unpredictable branches.
>
> Just or these two check and look at first nonzero byte. Either they differ at that offset or both are zero and easily get result from that.
>
Which two checks are you referring exactly? The first two:
+ ld r8,0(r3)
+ ld r10,0(r4)
+ li r9,0
+ cmpb r7,r8,r9
+ cmpdi cr7,r7,0
+ mr r9,r7
+ bne cr7,L(null_found)
+ cmpld cr7,r8,r10
+ bne cr7,L(different)
First cmpb instruction is not a branch instruction (and thus has no affect on
branch prediction). Also, in this code is it has to check for NULL first
before start to check different bytes at second dword. For instance, for
strings:
source1:
0x3fffb7fbffe8: 0x01 0x18 0x2f 0x46 0x5d 0x74 0x0c 0x23
0x3fffb7fbfff0: 0x00 0x17 0xa5 0xa5 0xa5 0xa5 0xa5 0xa5
source2:
0x3fffb7f7ffe8: 0x01 0x18 0x2f 0x46 0x5d 0x74 0x0c 0x23
0x3fffb7f7fff0: 0x00 0x18 0x5a 0x5a 0x5a 0x5a 0x5a 0x5a
If it omits the first '\0' check the second check (0x3fffb7fbfff0 vs
0x3fffb7f7fff0) will indicate different string and handled by L(different)
label. And the result would be -1 (0x17-0x18), instead of intended 0.
I could add another '\0' on L(different) label, but it will be just move
the branch to elsewhere, not eliminating it.
On Wed, Jan 07, 2015 at 03:10:25PM -0200, Adhemerval Zanella wrote:
> On 07-01-2015 14:30, Ondrej Bilka wrote:
> >> + /* For short string up to 16 bytes, load both s1 and s2 using
> >> + unaligned dwords and compare. */
> >> + ld r8,0(r3)
> >> + ld r10,0(r4)
> >> + li r9,0
> >> + cmpb r7,r8,r9
> >> + cmpdi cr7,r7,0
> >> + mr r9,r7
> >> + bne cr7,L(null_found)
> >> + cmpld cr7,r8,r10
> >> + bne cr7,L(different)
> >> + ld r8,8(r3)
> >> + ld r10,8(r4)
> >> + cmpb r9,r8,r7
> >> + cmpdi cr7,r9,r0
> >> + bne cr7,L(null_found)
> >> + cmpld cr7,r8,r10
> >> + bne cr7,L(different)
> >> + addi r7,r3,16
> >> + addi r4,r4,16
> > It makes no sense to do two separate checks which create pretty unpredictable branches.
> >
> > Just or these two check and look at first nonzero byte. Either they differ at that offset or both are zero and easily get result from that.
> >
> Which two checks are you referring exactly? The first two:
>
> + ld r8,0(r3)
> + ld r10,0(r4)
> + li r9,0
> + cmpb r7,r8,r9
> + cmpdi cr7,r7,0
> + mr r9,r7
> + bne cr7,L(null_found)
> + cmpld cr7,r8,r10
> + bne cr7,L(different)
>
> First cmpb instruction is not a branch instruction (and thus has no affect on
> branch prediction). Also, in this code is it has to check for NULL first
> before start to check different bytes at second dword. For instance, for
> strings:
>
No I am refering that to
bne cr7,L(null_found)
and
bne cr7,L(different)
you do need two nearly identical branches, just create mask that detects
both 0 and difference.
On x64 first 16 bytes are handled using this trick, you could replace
bytewise minimum there with bytewise and.
pxor %xmm2, %xmm2
movdqu (%rdi), %xmm1
movdqu (%rsi), %xmm0
pcmpeqb %xmm1, %xmm0
pminub %xmm1, %xmm0
pcmpeqb %xmm2, %xmm0
pmovmskb %xmm0, %eax
testq %rax, %rax
je L(next_48_bytes)
L(return):
bsfq %rax, %rdx
movzbl (%rdi, %rdx), %eax
movzbl (%rsi, %rdx), %edx
subl %edx, %eax
ret
On 07-01-2015 17:32, Ondřej Bílka wrote:
> On Wed, Jan 07, 2015 at 03:10:25PM -0200, Adhemerval Zanella wrote:
>> On 07-01-2015 14:30, Ondrej Bilka wrote:
>>>> + /* For short string up to 16 bytes, load both s1 and s2 using
>>>> + unaligned dwords and compare. */
>>>> + ld r8,0(r3)
>>>> + ld r10,0(r4)
>>>> + li r9,0
>>>> + cmpb r7,r8,r9
>>>> + cmpdi cr7,r7,0
>>>> + mr r9,r7
>>>> + bne cr7,L(null_found)
>>>> + cmpld cr7,r8,r10
>>>> + bne cr7,L(different)
>>>> + ld r8,8(r3)
>>>> + ld r10,8(r4)
>>>> + cmpb r9,r8,r7
>>>> + cmpdi cr7,r9,r0
>>>> + bne cr7,L(null_found)
>>>> + cmpld cr7,r8,r10
>>>> + bne cr7,L(different)
>>>> + addi r7,r3,16
>>>> + addi r4,r4,16
>>> It makes no sense to do two separate checks which create pretty unpredictable branches.
>>>
>>> Just or these two check and look at first nonzero byte. Either they differ at that offset or both are zero and easily get result from that.
>>>
>> Which two checks are you referring exactly? The first two:
>>
>> + ld r8,0(r3)
>> + ld r10,0(r4)
>> + li r9,0
>> + cmpb r7,r8,r9
>> + cmpdi cr7,r7,0
>> + mr r9,r7
>> + bne cr7,L(null_found)
>> + cmpld cr7,r8,r10
>> + bne cr7,L(different)
>>
>> First cmpb instruction is not a branch instruction (and thus has no affect on
>> branch prediction). Also, in this code is it has to check for NULL first
>> before start to check different bytes at second dword. For instance, for
>> strings:
>>
> No I am refering that to
> bne cr7,L(null_found)
> and
> bne cr7,L(different)
>
> you do need two nearly identical branches, just create mask that detects
> both 0 and difference.
>
> On x64 first 16 bytes are handled using this trick, you could replace
> bytewise minimum there with bytewise and.
>
> pxor %xmm2, %xmm2
> movdqu (%rdi), %xmm1
> movdqu (%rsi), %xmm0
> pcmpeqb %xmm1, %xmm0
> pminub %xmm1, %xmm0
> pcmpeqb %xmm2, %xmm0
> pmovmskb %xmm0, %eax
> testq %rax, %rax
> je L(next_48_bytes)
> L(return):
> bsfq %rax, %rdx
> movzbl (%rdi, %rdx), %eax
> movzbl (%rsi, %rdx), %edx
> subl %edx, %eax
> ret
>
I see your point now, I have changed to:
/* For short string up to 16 bytes, load both s1 and s2 using
unaligned dwords and compare. */
ld r8,0(r3)
ld r10,0(r4)
li r0,0
cmpb r12,r8,r0
cmpb r11,r8,r10
orc. r9,r12,r11
bne cr0,L(different_nocmpb)
ld r8,8(r3)
ld r10,8(r4)
cmpb r12,r8,r0
cmpb r11,r8,r10
orc. r9,r12,r11
bne cr0,L(different_nocmpb)
addi r7,r3,16
addi r4,r4,16
b L(align_16b)
[...]
L(different):
cmpb r9,r8,r10
#ifdef __LITTLE_ENDIAN__
nor r9,r9,r9
L(different_nocmpb):
neg r3,r9
and r9,r9,r3
cntlzd r9,r9
subfic r9,r9,63
#else
not r9,r9
L(different_nocmpb):
cntlzd r9,r9
subfic r9,r9,56
#endif
srd r3,r8,r9
srd r10,r10,r9
rldicl r10,r10,0,56
rldicl r3,r3,0,56
subf r3,r10,r3
blr
And after tuning some loop alignments I am seeing some cycles being spared.
Thanks for the tip.
@@ -1,6 +1,16 @@
2015-01-07 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
* sysdeps/powerpc/powerpc64/multiarch/Makefile [sysdep_routines]:
+ Add strcmp-power8 object.
+ * sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+ (__libc_ifunc_impl_list): Add __strcmp_power8 implementation.
+ * sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S: New file.
+ * sysdeps/powerpc/powerpc64/multiarch/strcmp.c (strcmp): Add
+ __strcmp_power8 implementation.
+ * sysdeps/powerpc/powerpc64/power8/strcmp.S: New file.
+ * NEWS: Update.
+
+ * sysdeps/powerpc/powerpc64/multiarch/Makefile [sysdep_routines]:
Add strncpy-power8 and stpncpy-power8 objects.
* sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
(__libc_ifunc_impl_list): Add __strncpy_power8 and stpncpy_power8
@@ -19,6 +19,9 @@ Version 2.21
17744, 17745, 17746, 17747, 17775, 17777, 17780, 17781, 17782, 17793,
17796, 17797, 17806
+* Optimized strcmp implementation for powerpc64/powerpc64le.
+ Implemented by Adhemerval Zanella..
+
* Optimized strcpy, stpcpy, strchrnul and strrchr implementations for
AArch64. Contributed by ARM Ltd.
@@ -18,7 +18,7 @@ sysdep_routines += memcpy-power7 memcpy-a2 memcpy-power6 memcpy-cell \
strrchr-power7 strrchr-ppc64 strncat-power7 strncat-ppc64 \
strncpy-power7 strncpy-ppc64 \
stpncpy-power8 stpncpy-power7 stpncpy-ppc64 \
- strcmp-power7 strcmp-ppc64 \
+ strcmp-power8 strcmp-power7 strcmp-ppc64 \
strcat-power8 strcat-power7 strcat-ppc64 memmove-power7 \
memmove-ppc64 bcopy-ppc64 strncpy-power8
@@ -301,6 +301,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strcmp.c. */
IFUNC_IMPL (i, name, strcmp,
IFUNC_IMPL_ADD (array, i, strcmp,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strcmp_power8)
+ IFUNC_IMPL_ADD (array, i, strcmp,
hwcap & PPC_FEATURE_HAS_VSX,
__strcmp_power7)
IFUNC_IMPL_ADD (array, i, strcmp, 1,
new file mode 100644
@@ -0,0 +1,40 @@
+/* Optimized strcmp implementation for POWER8/PPC64.
+ Copyright (C) 2014-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words) \
+ .section ".text"; \
+ ENTRY_2(__strcmp_power8) \
+ .align ALIGNARG(alignt); \
+ EALIGN_W_##words; \
+ BODY_LABEL(__strcmp_power8): \
+ cfi_startproc; \
+ LOCALENTRY(__strcmp_power8)
+
+#undef END
+#define END(name) \
+ cfi_endproc; \
+ TRACEBACK(__strcmp_power8) \
+ END_2(__strcmp_power8)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strcmp.S>
@@ -23,9 +23,12 @@
extern __typeof (strcmp) __strcmp_ppc attribute_hidden;
extern __typeof (strcmp) __strcmp_power7 attribute_hidden;
+extern __typeof (strcmp) __strcmp_power8 attribute_hidden;
libc_ifunc (strcmp,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strcmp_power7
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strcmp_power8 :
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strcmp_power7
: __strcmp_ppc);
#endif
new file mode 100644
@@ -0,0 +1,263 @@
+/* Optimized strcmp implementation for PowerPC64/POWER8.
+ Copyright (C) 2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* Implements the function
+
+ size_t [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
+
+ The implementation uses unaligned doubleword access to avoid specialized
+ code paths depending of data alignment. Although recent powerpc64 uses
+ 64K as default, the page cross handling assumes minimum page size of
+ 4k. */
+
+EALIGN (strcmp, 4, 0)
+ /* Check if [s1]+15 or [s2]+15 will cross a 4K page boundary using
+ the code:
+
+ (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
+
+ with PAGE_SIZE being 4096 and ITER_SIZE begin 16. */
+
+ rldicl r7,r3,0,52
+ rldicl r9,r4,0,52
+ cmpldi cr7,r7,4080
+ bgt cr7,L(pagecross_check)
+ cmpldi cr7,r9,4080
+ bgt cr7,L(pagecross_check)
+
+ /* For short string up to 16 bytes, load both s1 and s2 using
+ unaligned dwords and compare. */
+ ld r8,0(r3)
+ ld r10,0(r4)
+ li r9,0
+ cmpb r7,r8,r9
+ cmpdi cr7,r7,0
+ mr r9,r7
+ bne cr7,L(null_found)
+ cmpld cr7,r8,r10
+ bne cr7,L(different)
+ ld r8,8(r3)
+ ld r10,8(r4)
+ cmpb r9,r8,r7
+ cmpdi cr7,r9,r0
+ bne cr7,L(null_found)
+ cmpld cr7,r8,r10
+ bne cr7,L(different)
+ addi r7,r3,16
+ addi r4,r4,16
+ b L(align_16b)
+
+ /* A zero byte was found in r8 (s1 dword), r9 contains the cmpb
+ result and r10 the dword from s2. To code isolate the byte
+ up to end (including the '\0'), masking with 0xFF the remaining
+ ones:
+
+ #if __LITTLE_ENDIAN__
+ (__builtin_ffsl (x) - 1) = counting trailing zero bits
+ r9 = (__builtin_ffsl (r9) - 1) + 8;
+ r9 = -1UL << r9
+ #else
+ r9 = __builtin_clzl (r9) + 8;
+ r9 = -1UL >> r9
+ #endif
+ r8 = r8 | r9
+ r10 = r10 | r9 */
+L(null_found):
+#ifdef __LITTLE_ENDIAN__
+ neg r7,r9
+ and r9,r9,r7
+ li r7,-1
+ cntlzd r9,r9
+ subfic r9,r9,71
+ sld r9,r7,r9
+#else
+ cntlzd r9,r9
+ li r7,-1
+ addi r9,r9,8
+ srd r9,r7,r9
+#endif
+ or r8,r8,r9
+ or r10,r10,r9
+
+ /* The code now check if r8 and r10 are different by issuing a
+ cmpb and shift the result based on its output:
+
+ r9 = ~ (cmpb (r8, r10));
+ #ifdef __LITTLE_ENDIAN__
+ r9 = (__builtin_ffsl (r9) - 1);
+ #else
+ r9 = __builtin_clzl (r9);
+ #endif
+ r10 = (r10 >> (56 - r9)) & 0xFF;
+ r3 = ( r8 >> (56 - r9)) & 0xFF;
+ return r3 - r10 */
+L(different):
+ cmpb r9,r8,r10
+#ifdef __LITTLE_ENDIAN__
+ addi r7,r9,1
+ andc r9,r7,r9
+ cntlzd r9,r9
+ subfic r9,r9,63
+#else
+ not r9,r9
+ cntlzd r9,r9
+ subfic r9,r9,56
+#endif
+ srd r3,r8,r9
+ srd r10,r10,r9
+ rldicl r10,r10,0,56
+ rldicl r3,r3,0,56
+ subf r3,r10,r3
+ blr
+
+ .align 4
+L(align_16b):
+ /* Now it has checked for first 16 bytes, align source1 to doubleword
+ and adjust source2 address. */
+ rldicl r9,r7,0,61 /* source1 alignment to doubleword */
+ li r6,0
+ subf r4,r9,r4 /* Adjust source2 address based on source1
+ alignment. */
+ rldicr r7,r7,0,60 /* Align source1 to doubleword. */
+
+ /* At this point, source1 alignment is 0 and source2 alignment is
+ between 0 and 7. Check is source2 alignment is 0, meaning both
+ sources have the same alignment. */
+ andi. r9,r4,0x7
+ bne cr0,L(loop_diff_align)
+
+ /* If both source1 and source2 are doubleword aligned, there is no
+ need for page boundary cross checks. */
+ ld r8,0(r7)
+ ld r10,0(r4)
+ cmpb r9,r8,r9
+ cmpdi cr7,r9,0
+ bne cr7,L(null_found)
+ cmpld cr7,r10,r8
+ bne cr7,L(different)
+ li r6,0
+ b L(loop_equal_align)
+
+ .align 4
+L(loop_equal_align_0):
+ bne cr5,L(different)
+L(loop_equal_align):
+ ldu r8,8(7)
+ ldu r10,8(4)
+ cmpb r9,r8,r6
+ cmpdi cr7,r9,0
+ cmpld cr5,r8,r10
+ beq cr7,L(loop_equal_align_0)
+ b L(null_found)
+
+ .align 4
+L(pagecross_check):
+ subfic r9,r9,4096
+ subfic r7,r7,4096
+ cmpld cr7,r7,r9
+ bge cr7,L(pagecross)
+ mr r7,r9
+
+ /* If unaligned 16 bytes reads across a 4K page boundary, it uses
+ a simple byte a byte comparison until the page alignment for s1
+ is reached. */
+L(pagecross):
+ add r7,r3,r7
+ subf r9,r3,r7
+ mtctr r9
+
+ .align 4
+L(pagecross_loop):
+ /* Loads a byte from s1 and s2, compare if *s1 is equal to *s2
+ and if *s1 is '\0'. */
+ lbz r9,0(r3)
+ lbz r10,0(r4)
+ addi r3,r3,1
+ addi r4,r4,1
+ cmplw cr7,r9,r10
+ cmpdi cr5,r9,r0
+ bne cr7,L(pagecross_ne)
+ beq cr5,L(pagecross_nullfound)
+ bdnz L(pagecross_loop)
+ b L(align_16b)
+
+ .align 4
+ /* The unaligned read of source2 will cross a 4K page boundary,
+ and the different byte or NULL maybe be in the remaining page
+ bytes. In this since, since it can not use the unaligned
+ load the algorithm reads and compares 8 bytes to keep source1
+ doubleword aligned. */
+L(check_source2_byte):
+ li r9,8
+ mtctr r9
+
+ .align 4
+L(check_source2_byte_loop):
+ lbz r9,0(r7)
+ lbz r10,0(r4)
+ addi r7,r7,1
+ addi r4,r4,1
+ cmplw cr7,r9,10
+ cmpdi r5,r9,0
+ bne cr7,L(pagecross_ne)
+ beq cr5,L(pagecross_nullfound)
+ bdnz L(check_source2_byte_loop)
+
+ /* If source2 is unaligned to doubleword, the code needs to check
+ on each interation if the unaligned doubleword access will cross
+ a 4k page boundary. */
+ .align 4
+L(loop_unaligned):
+ ld r8,0(r7)
+ ld r10,0(r4)
+ cmpb r9,r8,r6
+ cmpdi cr7,r9,0
+ bne cr7,L(null_found)
+ cmpld cr7,r8,r10
+ bne cr7,L(different)
+ addi r7,r7,8
+ addi r4,r4,8
+
+L(loop_diff_align):
+ /* Check if [src2]+8 cross a 4k page boundary:
+
+ srcin2 % PAGE_SIZE > (PAGE_SIZE - 8)
+
+ with PAGE_SIZE being 4096. */
+ rldicl r9,r4,0,52
+ cmpldi cr7,r9,4088
+ ble cr7,L(loop_unaligned)
+ b L(check_source2_byte)
+
+ .align 4
+L(pagecross_ne):
+ extsw r3,r9
+ mr r9,r10
+L(pagecross_retdiff):
+ subf r9,r9,r3
+ extsw r3,r9
+ blr
+
+ .align 4
+L(pagecross_nullfound):
+ li r3,0
+ b L(pagecross_retdiff)
+END (strcmp)
+libc_hidden_builtin_def (strcmp)