[v1,2/3] x86: Optimize memcmp-avx2-movbe.S
Checks
Commit Message
No bug. This commit optimizes memcmp-avx2.S. The optimizations include
adding a new vec compare path for small sizes, reorganizing the entry
control flow, and removing some unnecissary ALU instructions from the
main loop. test-memcmp and test-wmemcmp are both passing.
Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
---
sysdeps/x86_64/multiarch/ifunc-memcmp.h | 1 +
sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S | 676 +++++++++++--------
2 files changed, 396 insertions(+), 281 deletions(-)
Comments
On Mon, May 17, 2021 at 02:44:07PM -0400, Noah Goldstein wrote:
> No bug. This commit optimizes memcmp-avx2.S. The optimizations include
> adding a new vec compare path for small sizes, reorganizing the entry
> control flow, and removing some unnecissary ALU instructions from the
> main loop. test-memcmp and test-wmemcmp are both passing.
>
> Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> ---
> sysdeps/x86_64/multiarch/ifunc-memcmp.h | 1 +
> sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S | 676 +++++++++++--------
> 2 files changed, 396 insertions(+), 281 deletions(-)
>
> diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmp.h b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
> index 8bee1aff75..89e2129968 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-memcmp.h
> +++ b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
> @@ -33,6 +33,7 @@ IFUNC_SELECTOR (void)
>
> if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)
> && CPU_FEATURE_USABLE_P (cpu_features, MOVBE)
> + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)
Please also update ifunc-impl-list.c to requre BMI2 for AVX2/EVEX
memcmp.
> && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> diff --git a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S
> index ad0fa962a1..2621ec907a 100644
> --- a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S
> +++ b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S
> @@ -19,17 +19,23 @@
> #if IS_IN (libc)
>
> /* memcmp/wmemcmp is implemented as:
> - 1. For size from 2 to 7 bytes, load as big endian with movbe and bswap
> - to avoid branches.
> - 2. Use overlapping compare to avoid branch.
> - 3. Use vector compare when size >= 4 bytes for memcmp or size >= 8
> - bytes for wmemcmp.
> - 4. If size is 8 * VEC_SIZE or less, unroll the loop.
> - 5. Compare 4 * VEC_SIZE at a time with the aligned first memory
> + 1. Use ymm vector compares when possible. The only case where
> + vector compares is not possible for when size < VEC_SIZE
> + and loading from either s1 or s2 would cause a page cross.
> + 2. For size from 2 to 7 bytes on page cross, load as big endian
> + with movbe and bswap to avoid branches.
> + 3. Use xmm vector compare when size >= 4 bytes for memcmp or
> + size >= 8 bytes for wmemcmp.
> + 4. Optimistically compare up to first 4 * VEC_SIZE one at a
> + to check for early mismatches. Only do this if its guranteed the
> + work is not wasted.
> + 5. If size is 8 * VEC_SIZE or less, unroll the loop.
> + 6. Compare 4 * VEC_SIZE at a time with the aligned first memory
> area.
> - 6. Use 2 vector compares when size is 2 * VEC_SIZE or less.
> - 7. Use 4 vector compares when size is 4 * VEC_SIZE or less.
> - 8. Use 8 vector compares when size is 8 * VEC_SIZE or less. */
> + 7. Use 2 vector compares when size is 2 * VEC_SIZE or less.
> + 8. Use 4 vector compares when size is 4 * VEC_SIZE or less.
> + 9. Use 8 vector compares when size is 8 * VEC_SIZE or less. */
> +
>
> # include <sysdep.h>
>
> @@ -38,8 +44,10 @@
> # endif
>
> # ifdef USE_AS_WMEMCMP
> +# define CHAR_SIZE 4
> # define VPCMPEQ vpcmpeqd
> # else
> +# define CHAR_SIZE 1
> # define VPCMPEQ vpcmpeqb
> # endif
>
> @@ -52,7 +60,7 @@
> # endif
>
> # define VEC_SIZE 32
> -# define VEC_MASK ((1 << VEC_SIZE) - 1)
> +# define PAGE_SIZE 4096
>
> /* Warning!
> wmemcmp has to use SIGNED comparison for elements.
> @@ -71,136 +79,359 @@ ENTRY (MEMCMP)
> jb L(less_vec)
>
> /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */
> - vmovdqu (%rsi), %ymm2
> - VPCMPEQ (%rdi), %ymm2, %ymm2
> - vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> + vmovdqu (%rsi), %ymm1
> + VPCMPEQ (%rdi), %ymm1, %ymm1
> + vpmovmskb %ymm1, %eax
> + /* NB: eax must be destination register if going to
> + L(return_vec_[0,2]). For L(return_vec_3 destination register
> + must be ecx. */
> + incl %eax
> + jnz L(return_vec_0)
>
> cmpq $(VEC_SIZE * 2), %rdx
> - jbe L(last_vec)
> -
> - VPCMPEQ %ymm0, %ymm0, %ymm0
> - /* More than 2 * VEC. */
> - cmpq $(VEC_SIZE * 8), %rdx
> - ja L(more_8x_vec)
> - cmpq $(VEC_SIZE * 4), %rdx
> - jb L(last_4x_vec)
> -
> - /* From 4 * VEC to 8 * VEC, inclusively. */
> - vmovdqu (%rsi), %ymm1
> - VPCMPEQ (%rdi), %ymm1, %ymm1
> + jbe L(last_1x_vec)
>
> + /* Check second VEC no matter what. */
> vmovdqu VEC_SIZE(%rsi), %ymm2
> - VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> + VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> + vpmovmskb %ymm2, %eax
> + /* If all 4 VEC where equal eax will be all 1s so incl will
> + overflow and set zero flag. */
> + incl %eax
> + jnz L(return_vec_1)
>
> - vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
> - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> + /* Less than 4 * VEC. */
> + cmpq $(VEC_SIZE * 4), %rdx
> + jbe L(last_2x_vec)
>
> + /* Check third and fourth VEC no matter what. */
> + vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
> + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> + vpmovmskb %ymm3, %eax
> + incl %eax
> + jnz L(return_vec_2)
> vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
> - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> + vpmovmskb %ymm4, %ecx
> + incl %ecx
> + jnz L(return_vec_3)
>
> - vpand %ymm1, %ymm2, %ymm5
> - vpand %ymm3, %ymm4, %ymm6
> - vpand %ymm5, %ymm6, %ymm5
> + /* Go to 4x VEC loop. */
> + cmpq $(VEC_SIZE * 8), %rdx
> + ja L(more_8x_vec)
>
> - vptest %ymm0, %ymm5
> - jnc L(4x_vec_end)
> + /* Handle remainder of size = 4 * VEC + 1 to 8 * VEC without any
> + branches. */
>
> + /* Load first two VEC from s2 before adjusting addresses. */
> + vmovdqu -(VEC_SIZE * 4)(%rsi, %rdx), %ymm1
> + vmovdqu -(VEC_SIZE * 3)(%rsi, %rdx), %ymm2
> leaq -(4 * VEC_SIZE)(%rdi, %rdx), %rdi
> leaq -(4 * VEC_SIZE)(%rsi, %rdx), %rsi
> - vmovdqu (%rsi), %ymm1
> - VPCMPEQ (%rdi), %ymm1, %ymm1
>
> - vmovdqu VEC_SIZE(%rsi), %ymm2
> - VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> - vpand %ymm2, %ymm1, %ymm5
> + /* Wait to load from s1 until addressed adjust due to
> + unlamination of microfusion with complex address mode. */
> + VPCMPEQ (%rdi), %ymm1, %ymm1
> + VPCMPEQ (VEC_SIZE)(%rdi), %ymm2, %ymm2
>
> vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
> - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> - vpand %ymm3, %ymm5, %ymm5
> -
> + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
> - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> - vpand %ymm4, %ymm5, %ymm5
> + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
>
> - vptest %ymm0, %ymm5
> - jnc L(4x_vec_end)
> - xorl %eax, %eax
> + /* Reduce VEC0 - VEC4. */
> + vpand %ymm1, %ymm2, %ymm5
> + vpand %ymm3, %ymm4, %ymm6
> + vpand %ymm5, %ymm6, %ymm7
> + vpmovmskb %ymm7, %ecx
> + incl %ecx
> + jnz L(return_vec_0_1_2_3)
> + /* NB: eax must be zero to reach here. */
> + VZEROUPPER_RETURN
> +
> + .p2align 4
> +L(return_vec_0):
> + tzcntl %eax, %eax
> +# ifdef USE_AS_WMEMCMP
> + movl (%rdi, %rax), %ecx
> + xorl %edx, %edx
> + cmpl (%rsi, %rax), %ecx
> + /* NB: no partial register stall here because xorl zero idiom
> + above. */
> + setg %dl
> + leal -1(%rdx, %rdx), %eax
> +# else
> + movzbl (%rsi, %rax), %ecx
> + movzbl (%rdi, %rax), %eax
> + subl %ecx, %eax
> +# endif
> L(return_vzeroupper):
> ZERO_UPPER_VEC_REGISTERS_RETURN
>
> .p2align 4
> -L(last_2x_vec):
> - /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */
> - vmovdqu (%rsi), %ymm2
> - VPCMPEQ (%rdi), %ymm2, %ymm2
> - vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> +L(return_vec_1):
> + tzcntl %eax, %eax
> +# ifdef USE_AS_WMEMCMP
> + movl VEC_SIZE(%rdi, %rax), %ecx
> + xorl %edx, %edx
> + cmpl VEC_SIZE(%rsi, %rax), %ecx
> + setg %dl
> + leal -1(%rdx, %rdx), %eax
> +# else
> + movzbl VEC_SIZE(%rsi, %rax), %ecx
> + movzbl VEC_SIZE(%rdi, %rax), %eax
> + subl %ecx, %eax
> +# endif
> + VZEROUPPER_RETURN
> +
> + .p2align 4
> +L(return_vec_2):
> + tzcntl %eax, %eax
> +# ifdef USE_AS_WMEMCMP
> + movl (VEC_SIZE * 2)(%rdi, %rax), %ecx
> + xorl %edx, %edx
> + cmpl (VEC_SIZE * 2)(%rsi, %rax), %ecx
> + setg %dl
> + leal -1(%rdx, %rdx), %eax
> +# else
> + movzbl (VEC_SIZE * 2)(%rsi, %rax), %ecx
> + movzbl (VEC_SIZE * 2)(%rdi, %rax), %eax
> + subl %ecx, %eax
> +# endif
> + VZEROUPPER_RETURN
> +
> + /* NB: p2align 5 here to ensure 4x loop is 32 byte aligned. */
> + .p2align 5
> +L(8x_return_vec_0_1_2_3):
> + /* Returning from L(more_8x_vec) requires restoring rsi. */
> + addq %rdi, %rsi
> +L(return_vec_0_1_2_3):
> + vpmovmskb %ymm1, %eax
> + incl %eax
> + jnz L(return_vec_0)
>
> -L(last_vec):
> - /* Use overlapping loads to avoid branches. */
> - leaq -VEC_SIZE(%rdi, %rdx), %rdi
> - leaq -VEC_SIZE(%rsi, %rdx), %rsi
> - vmovdqu (%rsi), %ymm2
> - VPCMPEQ (%rdi), %ymm2, %ymm2
> vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> + incl %eax
> + jnz L(return_vec_1)
> +
> + vpmovmskb %ymm3, %eax
> + incl %eax
> + jnz L(return_vec_2)
> +L(return_vec_3):
> + tzcntl %ecx, %ecx
> +# ifdef USE_AS_WMEMCMP
> + movl (VEC_SIZE * 3)(%rdi, %rcx), %eax
> + xorl %edx, %edx
> + cmpl (VEC_SIZE * 3)(%rsi, %rcx), %eax
> + setg %dl
> + leal -1(%rdx, %rdx), %eax
> +# else
> + movzbl (VEC_SIZE * 3)(%rdi, %rcx), %eax
> + movzbl (VEC_SIZE * 3)(%rsi, %rcx), %ecx
> + subl %ecx, %eax
> +# endif
> + VZEROUPPER_RETURN
> +
> + .p2align 4
> +L(more_8x_vec):
> + /* Set end of s1 in rdx. */
> + leaq -(VEC_SIZE * 4)(%rdi, %rdx), %rdx
> + /* rsi stores s2 - s1. This allows loop to only update one
> + pointer. */
> + subq %rdi, %rsi
> + /* Align s1 pointer. */
> + andq $-VEC_SIZE, %rdi
> + /* Adjust because first 4x vec where check already. */
> + subq $-(VEC_SIZE * 4), %rdi
> + .p2align 4
> +L(loop_4x_vec):
> + /* rsi has s2 - s1 so get correct address by adding s1 (in rdi).
> + */
> + vmovdqu (%rsi, %rdi), %ymm1
> + VPCMPEQ (%rdi), %ymm1, %ymm1
> +
> + vmovdqu VEC_SIZE(%rsi, %rdi), %ymm2
> + VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> +
> + vmovdqu (VEC_SIZE * 2)(%rsi, %rdi), %ymm3
> + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> +
> + vmovdqu (VEC_SIZE * 3)(%rsi, %rdi), %ymm4
> + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> +
> + vpand %ymm1, %ymm2, %ymm5
> + vpand %ymm3, %ymm4, %ymm6
> + vpand %ymm5, %ymm6, %ymm7
> + vpmovmskb %ymm7, %ecx
> + incl %ecx
> + jnz L(8x_return_vec_0_1_2_3)
> + subq $-(VEC_SIZE * 4), %rdi
> + /* Check if s1 pointer at end. */
> + cmpq %rdx, %rdi
> + jb L(loop_4x_vec)
> +
> + subq %rdx, %rdi
> + /* rdi has 4 * VEC_SIZE - remaining length. */
> + cmpl $(VEC_SIZE * 3), %edi
> + jae L(8x_last_1x_vec)
> + /* Load regardless of branch. */
> + vmovdqu (VEC_SIZE * 2)(%rsi, %rdx), %ymm3
> + cmpl $(VEC_SIZE * 2), %edi
> + jae L(8x_last_2x_vec)
> +
> + /* Check last 4 VEC. */
> + vmovdqu (%rsi, %rdx), %ymm1
> + VPCMPEQ (%rdx), %ymm1, %ymm1
> +
> + vmovdqu VEC_SIZE(%rsi, %rdx), %ymm2
> + VPCMPEQ VEC_SIZE(%rdx), %ymm2, %ymm2
> +
> + VPCMPEQ (VEC_SIZE * 2)(%rdx), %ymm3, %ymm3
> +
> + vmovdqu (VEC_SIZE * 3)(%rsi, %rdx), %ymm4
> + VPCMPEQ (VEC_SIZE * 3)(%rdx), %ymm4, %ymm4
> +
> + vpand %ymm1, %ymm2, %ymm5
> + vpand %ymm3, %ymm4, %ymm6
> + vpand %ymm5, %ymm6, %ymm7
> + vpmovmskb %ymm7, %ecx
> + /* Restore s1 pointer to rdi. */
> + movq %rdx, %rdi
> + incl %ecx
> + jnz L(8x_return_vec_0_1_2_3)
> + /* NB: eax must be zero to reach here. */
> + VZEROUPPER_RETURN
> +
> + /* Only entry is from L(more_8x_vec). */
> + .p2align 4
> +L(8x_last_2x_vec):
> + /* Check second to last VEC. rdx store end pointer of s1 and
> + ymm3 has already been loaded with second to last VEC from s2.
> + */
> + VPCMPEQ (VEC_SIZE * 2)(%rdx), %ymm3, %ymm3
> + vpmovmskb %ymm3, %eax
> + incl %eax
> + jnz L(8x_return_vec_2)
> + /* Check last VEC. */
> + .p2align 4
> +L(8x_last_1x_vec):
> + vmovdqu (VEC_SIZE * 3)(%rsi, %rdx), %ymm4
> + VPCMPEQ (VEC_SIZE * 3)(%rdx), %ymm4, %ymm4
> + vpmovmskb %ymm4, %eax
> + incl %eax
> + jnz L(8x_return_vec_3)
> VZEROUPPER_RETURN
>
> .p2align 4
> -L(first_vec):
> - /* A byte or int32 is different within 16 or 32 bytes. */
> - tzcntl %eax, %ecx
> +L(last_2x_vec):
> + /* Check second to last VEC. */
> + vmovdqu -(VEC_SIZE * 2)(%rsi, %rdx), %ymm1
> + VPCMPEQ -(VEC_SIZE * 2)(%rdi, %rdx), %ymm1, %ymm1
> + vpmovmskb %ymm1, %eax
> + incl %eax
> + jnz L(return_vec_1_end)
> + /* Check last VEC. */
> +L(last_1x_vec):
> + vmovdqu -(VEC_SIZE * 1)(%rsi, %rdx), %ymm1
> + VPCMPEQ -(VEC_SIZE * 1)(%rdi, %rdx), %ymm1, %ymm1
> + vpmovmskb %ymm1, %eax
> + incl %eax
> + jnz L(return_vec_0_end)
> + VZEROUPPER_RETURN
> +
> + .p2align 4
> +L(8x_return_vec_2):
> + subq $VEC_SIZE, %rdx
> +L(8x_return_vec_3):
> + tzcntl %eax, %eax
> + addq %rdx, %rax
> # ifdef USE_AS_WMEMCMP
> - xorl %eax, %eax
> - movl (%rdi, %rcx), %edx
> - cmpl (%rsi, %rcx), %edx
> -L(wmemcmp_return):
> - setl %al
> - negl %eax
> - orl $1, %eax
> + movl (VEC_SIZE * 3)(%rax), %ecx
> + xorl %edx, %edx
> + cmpl (VEC_SIZE * 3)(%rsi, %rax), %ecx
> + setg %dl
> + leal -1(%rdx, %rdx), %eax
> # else
> - movzbl (%rdi, %rcx), %eax
> - movzbl (%rsi, %rcx), %edx
> - sub %edx, %eax
> + movzbl (VEC_SIZE * 3)(%rsi, %rax), %ecx
> + movzbl (VEC_SIZE * 3)(%rax), %eax
> + subl %ecx, %eax
> # endif
> VZEROUPPER_RETURN
>
> -# ifdef USE_AS_WMEMCMP
> .p2align 4
> -L(4):
> - xorl %eax, %eax
> - movl (%rdi), %edx
> - cmpl (%rsi), %edx
> - jne L(wmemcmp_return)
> - ret
> +L(return_vec_1_end):
> + tzcntl %eax, %eax
> + addl %edx, %eax
> +# ifdef USE_AS_WMEMCMP
> + movl -(VEC_SIZE * 2)(%rdi, %rax), %ecx
> + xorl %edx, %edx
> + cmpl -(VEC_SIZE * 2)(%rsi, %rax), %ecx
> + setg %dl
> + leal -1(%rdx, %rdx), %eax
> # else
> + movzbl -(VEC_SIZE * 2)(%rsi, %rax), %ecx
> + movzbl -(VEC_SIZE * 2)(%rdi, %rax), %eax
> + subl %ecx, %eax
> +# endif
> + VZEROUPPER_RETURN
> +
> .p2align 4
> -L(between_4_7):
> - /* Load as big endian with overlapping movbe to avoid branches. */
> - movbe (%rdi), %eax
> - movbe (%rsi), %ecx
> - shlq $32, %rax
> - shlq $32, %rcx
> - movbe -4(%rdi, %rdx), %edi
> - movbe -4(%rsi, %rdx), %esi
> - orq %rdi, %rax
> - orq %rsi, %rcx
> - subq %rcx, %rax
> - je L(exit)
> - sbbl %eax, %eax
> - orl $1, %eax
> - ret
> +L(return_vec_0_end):
> + tzcntl %eax, %eax
> + addl %edx, %eax
> +# ifdef USE_AS_WMEMCMP
> + movl -VEC_SIZE(%rdi, %rax), %ecx
> + xorl %edx, %edx
> + cmpl -VEC_SIZE(%rsi, %rax), %ecx
> + setg %dl
> + leal -1(%rdx, %rdx), %eax
> +# else
> + movzbl -VEC_SIZE(%rsi, %rax), %ecx
> + movzbl -VEC_SIZE(%rdi, %rax), %eax
> + subl %ecx, %eax
> +# endif
> + VZEROUPPER_RETURN
>
> .p2align 4
> -L(exit):
> - ret
> +L(less_vec):
> + /* Check if one or less CHAR. This is necessary for size = 0 but
> + is also faster for size = CHAR_SIZE. */
> + cmpl $CHAR_SIZE, %edx
> + jbe L(one_or_less)
> +
> + /* Check if loading one VEC from either s1 or s2 could cause a
> + page cross. This can have false positives but is by far the
> + fastest method. */
> + movl %edi, %eax
> + orl %esi, %eax
> + andl $(PAGE_SIZE - 1), %eax
> + cmpl $(PAGE_SIZE - VEC_SIZE), %eax
> + jg L(page_cross_less_vec)
> +
> + /* No page cross possible. */
> + vmovdqu (%rsi), %ymm2
> + VPCMPEQ (%rdi), %ymm2, %ymm2
> + vpmovmskb %ymm2, %eax
> + incl %eax
> + /* Result will be zero if s1 and s2 match. Otherwise first set
> + bit will be first mismatch. */
> + bzhil %edx, %eax, %edx
> + jnz L(return_vec_0)
> + xorl %eax, %eax
> + VZEROUPPER_RETURN
>
> .p2align 4
> -L(between_2_3):
> +L(page_cross_less_vec):
> + /* if USE_AS_WMEMCMP it can only be 0, 4, 8, 12, 16, 20, 24, 28
> + bytes. */
> + cmpl $16, %edx
> + jae L(between_16_31)
> +# ifndef USE_AS_WMEMCMP
> + cmpl $8, %edx
> + jae L(between_8_15)
> + cmpl $4, %edx
> + jae L(between_4_7)
> +
> /* Load as big endian to avoid branches. */
> movzwl (%rdi), %eax
> movzwl (%rsi), %ecx
> @@ -208,223 +439,106 @@ L(between_2_3):
> shll $8, %ecx
> bswap %eax
> bswap %ecx
> - movb -1(%rdi, %rdx), %al
> - movb -1(%rsi, %rdx), %cl
> + movzbl -1(%rdi, %rdx), %edi
> + movzbl -1(%rsi, %rdx), %esi
> + orl %edi, %eax
> + orl %esi, %ecx
> /* Subtraction is okay because the upper 8 bits are zero. */
> subl %ecx, %eax
> + /* No ymm register was touched. */
> ret
>
> .p2align 4
> -L(1):
> - movzbl (%rdi), %eax
> +L(one_or_less):
> + jb L(zero)
> movzbl (%rsi), %ecx
> + movzbl (%rdi), %eax
> subl %ecx, %eax
> - ret
> -# endif
> -
> - .p2align 4
> -L(zero):
> - xorl %eax, %eax
> + /* No ymm register was touched. */
> ret
>
> .p2align 4
> -L(less_vec):
> -# ifdef USE_AS_WMEMCMP
> - /* It can only be 0, 4, 8, 12, 16, 20, 24, 28 bytes. */
> - cmpb $4, %dl
> - je L(4)
> - jb L(zero)
> -# else
> - cmpb $1, %dl
> - je L(1)
> - jb L(zero)
> - cmpb $4, %dl
> - jb L(between_2_3)
> - cmpb $8, %dl
> - jb L(between_4_7)
> +L(between_8_15):
> # endif
> - cmpb $16, %dl
> - jae L(between_16_31)
> - /* It is between 8 and 15 bytes. */
> + /* If USE_AS_WMEMCMP fall through into 8-15 byte case. */
> vmovq (%rdi), %xmm1
> vmovq (%rsi), %xmm2
> - VPCMPEQ %xmm1, %xmm2, %xmm2
> + VPCMPEQ %xmm1, %xmm2, %xmm2
> vpmovmskb %xmm2, %eax
> - subl $0xffff, %eax
> - jnz L(first_vec)
> + subl $0xffff, %eax
> + jnz L(return_vec_0)
> /* Use overlapping loads to avoid branches. */
> leaq -8(%rdi, %rdx), %rdi
> leaq -8(%rsi, %rdx), %rsi
> vmovq (%rdi), %xmm1
> vmovq (%rsi), %xmm2
> - VPCMPEQ %xmm1, %xmm2, %xmm2
> + VPCMPEQ %xmm1, %xmm2, %xmm2
> vpmovmskb %xmm2, %eax
> - subl $0xffff, %eax
> - jnz L(first_vec)
> + subl $0xffff, %eax
> + jnz L(return_vec_0)
> + /* No ymm register was touched. */
> + ret
> +
> + .p2align 4
> +L(zero):
> + xorl %eax, %eax
> ret
>
> .p2align 4
> L(between_16_31):
> /* From 16 to 31 bytes. No branch when size == 16. */
> vmovdqu (%rsi), %xmm2
> - VPCMPEQ (%rdi), %xmm2, %xmm2
> + VPCMPEQ (%rdi), %xmm2, %xmm2
> vpmovmskb %xmm2, %eax
> - subl $0xffff, %eax
> - jnz L(first_vec)
> + subl $0xffff, %eax
> + jnz L(return_vec_0)
>
> /* Use overlapping loads to avoid branches. */
> +
> + vmovdqu -16(%rsi, %rdx), %xmm2
> leaq -16(%rdi, %rdx), %rdi
> leaq -16(%rsi, %rdx), %rsi
> - vmovdqu (%rsi), %xmm2
> - VPCMPEQ (%rdi), %xmm2, %xmm2
> + VPCMPEQ (%rdi), %xmm2, %xmm2
> vpmovmskb %xmm2, %eax
> - subl $0xffff, %eax
> - jnz L(first_vec)
> + subl $0xffff, %eax
> + jnz L(return_vec_0)
> + /* No ymm register was touched. */
> ret
>
> - .p2align 4
> -L(more_8x_vec):
> - /* More than 8 * VEC. Check the first VEC. */
> - vmovdqu (%rsi), %ymm2
> - VPCMPEQ (%rdi), %ymm2, %ymm2
> - vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> -
> - /* Align the first memory area for aligned loads in the loop.
> - Compute how much the first memory area is misaligned. */
> - movq %rdi, %rcx
> - andl $(VEC_SIZE - 1), %ecx
> - /* Get the negative of offset for alignment. */
> - subq $VEC_SIZE, %rcx
> - /* Adjust the second memory area. */
> - subq %rcx, %rsi
> - /* Adjust the first memory area which should be aligned now. */
> - subq %rcx, %rdi
> - /* Adjust length. */
> - addq %rcx, %rdx
> -
> -L(loop_4x_vec):
> - /* Compare 4 * VEC at a time forward. */
> - vmovdqu (%rsi), %ymm1
> - VPCMPEQ (%rdi), %ymm1, %ymm1
> -
> - vmovdqu VEC_SIZE(%rsi), %ymm2
> - VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> - vpand %ymm2, %ymm1, %ymm5
> -
> - vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
> - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> - vpand %ymm3, %ymm5, %ymm5
> -
> - vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
> - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> - vpand %ymm4, %ymm5, %ymm5
> -
> - vptest %ymm0, %ymm5
> - jnc L(4x_vec_end)
> -
> - addq $(VEC_SIZE * 4), %rdi
> - addq $(VEC_SIZE * 4), %rsi
> -
> - subq $(VEC_SIZE * 4), %rdx
> - cmpq $(VEC_SIZE * 4), %rdx
> - jae L(loop_4x_vec)
> -
> - /* Less than 4 * VEC. */
> - cmpq $VEC_SIZE, %rdx
> - jbe L(last_vec)
> - cmpq $(VEC_SIZE * 2), %rdx
> - jbe L(last_2x_vec)
> -
> -L(last_4x_vec):
> - /* From 2 * VEC to 4 * VEC. */
> - vmovdqu (%rsi), %ymm2
> - VPCMPEQ (%rdi), %ymm2, %ymm2
> - vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> -
> - addq $VEC_SIZE, %rdi
> - addq $VEC_SIZE, %rsi
> - vmovdqu (%rsi), %ymm2
> - VPCMPEQ (%rdi), %ymm2, %ymm2
> - vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> -
> - /* Use overlapping loads to avoid branches. */
> - leaq -(3 * VEC_SIZE)(%rdi, %rdx), %rdi
> - leaq -(3 * VEC_SIZE)(%rsi, %rdx), %rsi
> - vmovdqu (%rsi), %ymm2
> - VPCMPEQ (%rdi), %ymm2, %ymm2
> - vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> -
> - addq $VEC_SIZE, %rdi
> - addq $VEC_SIZE, %rsi
> - vmovdqu (%rsi), %ymm2
> - VPCMPEQ (%rdi), %ymm2, %ymm2
> - vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> - VZEROUPPER_RETURN
> -
> - .p2align 4
> -L(4x_vec_end):
> - vpmovmskb %ymm1, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec)
> - vpmovmskb %ymm2, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec_x1)
> - vpmovmskb %ymm3, %eax
> - subl $VEC_MASK, %eax
> - jnz L(first_vec_x2)
> - vpmovmskb %ymm4, %eax
> - subl $VEC_MASK, %eax
> - tzcntl %eax, %ecx
> # ifdef USE_AS_WMEMCMP
> - xorl %eax, %eax
> - movl (VEC_SIZE * 3)(%rdi, %rcx), %edx
> - cmpl (VEC_SIZE * 3)(%rsi, %rcx), %edx
> - jmp L(wmemcmp_return)
> -# else
> - movzbl (VEC_SIZE * 3)(%rdi, %rcx), %eax
> - movzbl (VEC_SIZE * 3)(%rsi, %rcx), %edx
> - sub %edx, %eax
> -# endif
> - VZEROUPPER_RETURN
> -
> .p2align 4
> -L(first_vec_x1):
> - tzcntl %eax, %ecx
> -# ifdef USE_AS_WMEMCMP
> - xorl %eax, %eax
> - movl VEC_SIZE(%rdi, %rcx), %edx
> - cmpl VEC_SIZE(%rsi, %rcx), %edx
> - jmp L(wmemcmp_return)
> +L(one_or_less):
> + jb L(zero)
> + movl (%rdi), %ecx
> + xorl %edx, %edx
> + cmpl (%rsi), %ecx
> + je L(zero)
> + setg %dl
> + leal -1(%rdx, %rdx), %eax
> + /* No ymm register was touched. */
> + ret
> # else
> - movzbl VEC_SIZE(%rdi, %rcx), %eax
> - movzbl VEC_SIZE(%rsi, %rcx), %edx
> - sub %edx, %eax
> -# endif
> - VZEROUPPER_RETURN
>
> .p2align 4
> -L(first_vec_x2):
> - tzcntl %eax, %ecx
> -# ifdef USE_AS_WMEMCMP
> - xorl %eax, %eax
> - movl (VEC_SIZE * 2)(%rdi, %rcx), %edx
> - cmpl (VEC_SIZE * 2)(%rsi, %rcx), %edx
> - jmp L(wmemcmp_return)
> -# else
> - movzbl (VEC_SIZE * 2)(%rdi, %rcx), %eax
> - movzbl (VEC_SIZE * 2)(%rsi, %rcx), %edx
> - sub %edx, %eax
> +L(between_4_7):
> + /* Load as big endian with overlapping movbe to avoid branches.
> + */
> + movbe (%rdi), %eax
> + movbe (%rsi), %ecx
> + shlq $32, %rax
> + shlq $32, %rcx
> + movbe -4(%rdi, %rdx), %edi
> + movbe -4(%rsi, %rdx), %esi
> + orq %rdi, %rax
> + orq %rsi, %rcx
> + subq %rcx, %rax
> + jz L(zero_4_7)
> + sbbl %eax, %eax
> + orl $1, %eax
> +L(zero_4_7):
> + /* No ymm register was touched. */
> + ret
> # endif
> - VZEROUPPER_RETURN
> +
> END (MEMCMP)
> #endif
> --
> 2.29.2
>
H.J.
On Tue, May 18, 2021 at 9:01 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Mon, May 17, 2021 at 02:44:07PM -0400, Noah Goldstein wrote:
> > No bug. This commit optimizes memcmp-avx2.S. The optimizations include
> > adding a new vec compare path for small sizes, reorganizing the entry
> > control flow, and removing some unnecissary ALU instructions from the
> > main loop. test-memcmp and test-wmemcmp are both passing.
> >
> > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> > ---
> > sysdeps/x86_64/multiarch/ifunc-memcmp.h | 1 +
> > sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S | 676 +++++++++++--------
> > 2 files changed, 396 insertions(+), 281 deletions(-)
> >
> > diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmp.h b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
> > index 8bee1aff75..89e2129968 100644
> > --- a/sysdeps/x86_64/multiarch/ifunc-memcmp.h
> > +++ b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
> > @@ -33,6 +33,7 @@ IFUNC_SELECTOR (void)
> >
> > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)
> > && CPU_FEATURE_USABLE_P (cpu_features, MOVBE)
> > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)
>
> Please also update ifunc-impl-list.c to requre BMI2 for AVX2/EVEX
> memcmp.
Done.
>
> > && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
> > {
> > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > diff --git a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S
> > index ad0fa962a1..2621ec907a 100644
> > --- a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S
> > +++ b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S
> > @@ -19,17 +19,23 @@
> > #if IS_IN (libc)
> >
> > /* memcmp/wmemcmp is implemented as:
> > - 1. For size from 2 to 7 bytes, load as big endian with movbe and bswap
> > - to avoid branches.
> > - 2. Use overlapping compare to avoid branch.
> > - 3. Use vector compare when size >= 4 bytes for memcmp or size >= 8
> > - bytes for wmemcmp.
> > - 4. If size is 8 * VEC_SIZE or less, unroll the loop.
> > - 5. Compare 4 * VEC_SIZE at a time with the aligned first memory
> > + 1. Use ymm vector compares when possible. The only case where
> > + vector compares is not possible for when size < VEC_SIZE
> > + and loading from either s1 or s2 would cause a page cross.
> > + 2. For size from 2 to 7 bytes on page cross, load as big endian
> > + with movbe and bswap to avoid branches.
> > + 3. Use xmm vector compare when size >= 4 bytes for memcmp or
> > + size >= 8 bytes for wmemcmp.
> > + 4. Optimistically compare up to first 4 * VEC_SIZE one at a
> > + to check for early mismatches. Only do this if its guranteed the
> > + work is not wasted.
> > + 5. If size is 8 * VEC_SIZE or less, unroll the loop.
> > + 6. Compare 4 * VEC_SIZE at a time with the aligned first memory
> > area.
> > - 6. Use 2 vector compares when size is 2 * VEC_SIZE or less.
> > - 7. Use 4 vector compares when size is 4 * VEC_SIZE or less.
> > - 8. Use 8 vector compares when size is 8 * VEC_SIZE or less. */
> > + 7. Use 2 vector compares when size is 2 * VEC_SIZE or less.
> > + 8. Use 4 vector compares when size is 4 * VEC_SIZE or less.
> > + 9. Use 8 vector compares when size is 8 * VEC_SIZE or less. */
> > +
> >
> > # include <sysdep.h>
> >
> > @@ -38,8 +44,10 @@
> > # endif
> >
> > # ifdef USE_AS_WMEMCMP
> > +# define CHAR_SIZE 4
> > # define VPCMPEQ vpcmpeqd
> > # else
> > +# define CHAR_SIZE 1
> > # define VPCMPEQ vpcmpeqb
> > # endif
> >
> > @@ -52,7 +60,7 @@
> > # endif
> >
> > # define VEC_SIZE 32
> > -# define VEC_MASK ((1 << VEC_SIZE) - 1)
> > +# define PAGE_SIZE 4096
> >
> > /* Warning!
> > wmemcmp has to use SIGNED comparison for elements.
> > @@ -71,136 +79,359 @@ ENTRY (MEMCMP)
> > jb L(less_vec)
> >
> > /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */
> > - vmovdqu (%rsi), %ymm2
> > - VPCMPEQ (%rdi), %ymm2, %ymm2
> > - vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > + vmovdqu (%rsi), %ymm1
> > + VPCMPEQ (%rdi), %ymm1, %ymm1
> > + vpmovmskb %ymm1, %eax
> > + /* NB: eax must be destination register if going to
> > + L(return_vec_[0,2]). For L(return_vec_3 destination register
> > + must be ecx. */
> > + incl %eax
> > + jnz L(return_vec_0)
> >
> > cmpq $(VEC_SIZE * 2), %rdx
> > - jbe L(last_vec)
> > -
> > - VPCMPEQ %ymm0, %ymm0, %ymm0
> > - /* More than 2 * VEC. */
> > - cmpq $(VEC_SIZE * 8), %rdx
> > - ja L(more_8x_vec)
> > - cmpq $(VEC_SIZE * 4), %rdx
> > - jb L(last_4x_vec)
> > -
> > - /* From 4 * VEC to 8 * VEC, inclusively. */
> > - vmovdqu (%rsi), %ymm1
> > - VPCMPEQ (%rdi), %ymm1, %ymm1
> > + jbe L(last_1x_vec)
> >
> > + /* Check second VEC no matter what. */
> > vmovdqu VEC_SIZE(%rsi), %ymm2
> > - VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> > + VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> > + vpmovmskb %ymm2, %eax
> > + /* If all 4 VEC where equal eax will be all 1s so incl will
> > + overflow and set zero flag. */
> > + incl %eax
> > + jnz L(return_vec_1)
> >
> > - vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
> > - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> > + /* Less than 4 * VEC. */
> > + cmpq $(VEC_SIZE * 4), %rdx
> > + jbe L(last_2x_vec)
> >
> > + /* Check third and fourth VEC no matter what. */
> > + vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
> > + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> > + vpmovmskb %ymm3, %eax
> > + incl %eax
> > + jnz L(return_vec_2)
> > vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
> > - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> > + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> > + vpmovmskb %ymm4, %ecx
> > + incl %ecx
> > + jnz L(return_vec_3)
> >
> > - vpand %ymm1, %ymm2, %ymm5
> > - vpand %ymm3, %ymm4, %ymm6
> > - vpand %ymm5, %ymm6, %ymm5
> > + /* Go to 4x VEC loop. */
> > + cmpq $(VEC_SIZE * 8), %rdx
> > + ja L(more_8x_vec)
> >
> > - vptest %ymm0, %ymm5
> > - jnc L(4x_vec_end)
> > + /* Handle remainder of size = 4 * VEC + 1 to 8 * VEC without any
> > + branches. */
> >
> > + /* Load first two VEC from s2 before adjusting addresses. */
> > + vmovdqu -(VEC_SIZE * 4)(%rsi, %rdx), %ymm1
> > + vmovdqu -(VEC_SIZE * 3)(%rsi, %rdx), %ymm2
> > leaq -(4 * VEC_SIZE)(%rdi, %rdx), %rdi
> > leaq -(4 * VEC_SIZE)(%rsi, %rdx), %rsi
> > - vmovdqu (%rsi), %ymm1
> > - VPCMPEQ (%rdi), %ymm1, %ymm1
> >
> > - vmovdqu VEC_SIZE(%rsi), %ymm2
> > - VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> > - vpand %ymm2, %ymm1, %ymm5
> > + /* Wait to load from s1 until addressed adjust due to
> > + unlamination of microfusion with complex address mode. */
> > + VPCMPEQ (%rdi), %ymm1, %ymm1
> > + VPCMPEQ (VEC_SIZE)(%rdi), %ymm2, %ymm2
> >
> > vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
> > - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> > - vpand %ymm3, %ymm5, %ymm5
> > -
> > + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> > vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
> > - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> > - vpand %ymm4, %ymm5, %ymm5
> > + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> >
> > - vptest %ymm0, %ymm5
> > - jnc L(4x_vec_end)
> > - xorl %eax, %eax
> > + /* Reduce VEC0 - VEC4. */
> > + vpand %ymm1, %ymm2, %ymm5
> > + vpand %ymm3, %ymm4, %ymm6
> > + vpand %ymm5, %ymm6, %ymm7
> > + vpmovmskb %ymm7, %ecx
> > + incl %ecx
> > + jnz L(return_vec_0_1_2_3)
> > + /* NB: eax must be zero to reach here. */
> > + VZEROUPPER_RETURN
> > +
> > + .p2align 4
> > +L(return_vec_0):
> > + tzcntl %eax, %eax
> > +# ifdef USE_AS_WMEMCMP
> > + movl (%rdi, %rax), %ecx
> > + xorl %edx, %edx
> > + cmpl (%rsi, %rax), %ecx
> > + /* NB: no partial register stall here because xorl zero idiom
> > + above. */
> > + setg %dl
> > + leal -1(%rdx, %rdx), %eax
> > +# else
> > + movzbl (%rsi, %rax), %ecx
> > + movzbl (%rdi, %rax), %eax
> > + subl %ecx, %eax
> > +# endif
> > L(return_vzeroupper):
> > ZERO_UPPER_VEC_REGISTERS_RETURN
> >
> > .p2align 4
> > -L(last_2x_vec):
> > - /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */
> > - vmovdqu (%rsi), %ymm2
> > - VPCMPEQ (%rdi), %ymm2, %ymm2
> > - vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > +L(return_vec_1):
> > + tzcntl %eax, %eax
> > +# ifdef USE_AS_WMEMCMP
> > + movl VEC_SIZE(%rdi, %rax), %ecx
> > + xorl %edx, %edx
> > + cmpl VEC_SIZE(%rsi, %rax), %ecx
> > + setg %dl
> > + leal -1(%rdx, %rdx), %eax
> > +# else
> > + movzbl VEC_SIZE(%rsi, %rax), %ecx
> > + movzbl VEC_SIZE(%rdi, %rax), %eax
> > + subl %ecx, %eax
> > +# endif
> > + VZEROUPPER_RETURN
> > +
> > + .p2align 4
> > +L(return_vec_2):
> > + tzcntl %eax, %eax
> > +# ifdef USE_AS_WMEMCMP
> > + movl (VEC_SIZE * 2)(%rdi, %rax), %ecx
> > + xorl %edx, %edx
> > + cmpl (VEC_SIZE * 2)(%rsi, %rax), %ecx
> > + setg %dl
> > + leal -1(%rdx, %rdx), %eax
> > +# else
> > + movzbl (VEC_SIZE * 2)(%rsi, %rax), %ecx
> > + movzbl (VEC_SIZE * 2)(%rdi, %rax), %eax
> > + subl %ecx, %eax
> > +# endif
> > + VZEROUPPER_RETURN
> > +
> > + /* NB: p2align 5 here to ensure 4x loop is 32 byte aligned. */
> > + .p2align 5
> > +L(8x_return_vec_0_1_2_3):
> > + /* Returning from L(more_8x_vec) requires restoring rsi. */
> > + addq %rdi, %rsi
> > +L(return_vec_0_1_2_3):
> > + vpmovmskb %ymm1, %eax
> > + incl %eax
> > + jnz L(return_vec_0)
> >
> > -L(last_vec):
> > - /* Use overlapping loads to avoid branches. */
> > - leaq -VEC_SIZE(%rdi, %rdx), %rdi
> > - leaq -VEC_SIZE(%rsi, %rdx), %rsi
> > - vmovdqu (%rsi), %ymm2
> > - VPCMPEQ (%rdi), %ymm2, %ymm2
> > vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > + incl %eax
> > + jnz L(return_vec_1)
> > +
> > + vpmovmskb %ymm3, %eax
> > + incl %eax
> > + jnz L(return_vec_2)
> > +L(return_vec_3):
> > + tzcntl %ecx, %ecx
> > +# ifdef USE_AS_WMEMCMP
> > + movl (VEC_SIZE * 3)(%rdi, %rcx), %eax
> > + xorl %edx, %edx
> > + cmpl (VEC_SIZE * 3)(%rsi, %rcx), %eax
> > + setg %dl
> > + leal -1(%rdx, %rdx), %eax
> > +# else
> > + movzbl (VEC_SIZE * 3)(%rdi, %rcx), %eax
> > + movzbl (VEC_SIZE * 3)(%rsi, %rcx), %ecx
> > + subl %ecx, %eax
> > +# endif
> > + VZEROUPPER_RETURN
> > +
> > + .p2align 4
> > +L(more_8x_vec):
> > + /* Set end of s1 in rdx. */
> > + leaq -(VEC_SIZE * 4)(%rdi, %rdx), %rdx
> > + /* rsi stores s2 - s1. This allows loop to only update one
> > + pointer. */
> > + subq %rdi, %rsi
> > + /* Align s1 pointer. */
> > + andq $-VEC_SIZE, %rdi
> > + /* Adjust because first 4x vec where check already. */
> > + subq $-(VEC_SIZE * 4), %rdi
> > + .p2align 4
> > +L(loop_4x_vec):
> > + /* rsi has s2 - s1 so get correct address by adding s1 (in rdi).
> > + */
> > + vmovdqu (%rsi, %rdi), %ymm1
> > + VPCMPEQ (%rdi), %ymm1, %ymm1
> > +
> > + vmovdqu VEC_SIZE(%rsi, %rdi), %ymm2
> > + VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> > +
> > + vmovdqu (VEC_SIZE * 2)(%rsi, %rdi), %ymm3
> > + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> > +
> > + vmovdqu (VEC_SIZE * 3)(%rsi, %rdi), %ymm4
> > + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> > +
> > + vpand %ymm1, %ymm2, %ymm5
> > + vpand %ymm3, %ymm4, %ymm6
> > + vpand %ymm5, %ymm6, %ymm7
> > + vpmovmskb %ymm7, %ecx
> > + incl %ecx
> > + jnz L(8x_return_vec_0_1_2_3)
> > + subq $-(VEC_SIZE * 4), %rdi
> > + /* Check if s1 pointer at end. */
> > + cmpq %rdx, %rdi
> > + jb L(loop_4x_vec)
> > +
> > + subq %rdx, %rdi
> > + /* rdi has 4 * VEC_SIZE - remaining length. */
> > + cmpl $(VEC_SIZE * 3), %edi
> > + jae L(8x_last_1x_vec)
> > + /* Load regardless of branch. */
> > + vmovdqu (VEC_SIZE * 2)(%rsi, %rdx), %ymm3
> > + cmpl $(VEC_SIZE * 2), %edi
> > + jae L(8x_last_2x_vec)
> > +
> > + /* Check last 4 VEC. */
> > + vmovdqu (%rsi, %rdx), %ymm1
> > + VPCMPEQ (%rdx), %ymm1, %ymm1
> > +
> > + vmovdqu VEC_SIZE(%rsi, %rdx), %ymm2
> > + VPCMPEQ VEC_SIZE(%rdx), %ymm2, %ymm2
> > +
> > + VPCMPEQ (VEC_SIZE * 2)(%rdx), %ymm3, %ymm3
> > +
> > + vmovdqu (VEC_SIZE * 3)(%rsi, %rdx), %ymm4
> > + VPCMPEQ (VEC_SIZE * 3)(%rdx), %ymm4, %ymm4
> > +
> > + vpand %ymm1, %ymm2, %ymm5
> > + vpand %ymm3, %ymm4, %ymm6
> > + vpand %ymm5, %ymm6, %ymm7
> > + vpmovmskb %ymm7, %ecx
> > + /* Restore s1 pointer to rdi. */
> > + movq %rdx, %rdi
> > + incl %ecx
> > + jnz L(8x_return_vec_0_1_2_3)
> > + /* NB: eax must be zero to reach here. */
> > + VZEROUPPER_RETURN
> > +
> > + /* Only entry is from L(more_8x_vec). */
> > + .p2align 4
> > +L(8x_last_2x_vec):
> > + /* Check second to last VEC. rdx store end pointer of s1 and
> > + ymm3 has already been loaded with second to last VEC from s2.
> > + */
> > + VPCMPEQ (VEC_SIZE * 2)(%rdx), %ymm3, %ymm3
> > + vpmovmskb %ymm3, %eax
> > + incl %eax
> > + jnz L(8x_return_vec_2)
> > + /* Check last VEC. */
> > + .p2align 4
> > +L(8x_last_1x_vec):
> > + vmovdqu (VEC_SIZE * 3)(%rsi, %rdx), %ymm4
> > + VPCMPEQ (VEC_SIZE * 3)(%rdx), %ymm4, %ymm4
> > + vpmovmskb %ymm4, %eax
> > + incl %eax
> > + jnz L(8x_return_vec_3)
> > VZEROUPPER_RETURN
> >
> > .p2align 4
> > -L(first_vec):
> > - /* A byte or int32 is different within 16 or 32 bytes. */
> > - tzcntl %eax, %ecx
> > +L(last_2x_vec):
> > + /* Check second to last VEC. */
> > + vmovdqu -(VEC_SIZE * 2)(%rsi, %rdx), %ymm1
> > + VPCMPEQ -(VEC_SIZE * 2)(%rdi, %rdx), %ymm1, %ymm1
> > + vpmovmskb %ymm1, %eax
> > + incl %eax
> > + jnz L(return_vec_1_end)
> > + /* Check last VEC. */
> > +L(last_1x_vec):
> > + vmovdqu -(VEC_SIZE * 1)(%rsi, %rdx), %ymm1
> > + VPCMPEQ -(VEC_SIZE * 1)(%rdi, %rdx), %ymm1, %ymm1
> > + vpmovmskb %ymm1, %eax
> > + incl %eax
> > + jnz L(return_vec_0_end)
> > + VZEROUPPER_RETURN
> > +
> > + .p2align 4
> > +L(8x_return_vec_2):
> > + subq $VEC_SIZE, %rdx
> > +L(8x_return_vec_3):
> > + tzcntl %eax, %eax
> > + addq %rdx, %rax
> > # ifdef USE_AS_WMEMCMP
> > - xorl %eax, %eax
> > - movl (%rdi, %rcx), %edx
> > - cmpl (%rsi, %rcx), %edx
> > -L(wmemcmp_return):
> > - setl %al
> > - negl %eax
> > - orl $1, %eax
> > + movl (VEC_SIZE * 3)(%rax), %ecx
> > + xorl %edx, %edx
> > + cmpl (VEC_SIZE * 3)(%rsi, %rax), %ecx
> > + setg %dl
> > + leal -1(%rdx, %rdx), %eax
> > # else
> > - movzbl (%rdi, %rcx), %eax
> > - movzbl (%rsi, %rcx), %edx
> > - sub %edx, %eax
> > + movzbl (VEC_SIZE * 3)(%rsi, %rax), %ecx
> > + movzbl (VEC_SIZE * 3)(%rax), %eax
> > + subl %ecx, %eax
> > # endif
> > VZEROUPPER_RETURN
> >
> > -# ifdef USE_AS_WMEMCMP
> > .p2align 4
> > -L(4):
> > - xorl %eax, %eax
> > - movl (%rdi), %edx
> > - cmpl (%rsi), %edx
> > - jne L(wmemcmp_return)
> > - ret
> > +L(return_vec_1_end):
> > + tzcntl %eax, %eax
> > + addl %edx, %eax
> > +# ifdef USE_AS_WMEMCMP
> > + movl -(VEC_SIZE * 2)(%rdi, %rax), %ecx
> > + xorl %edx, %edx
> > + cmpl -(VEC_SIZE * 2)(%rsi, %rax), %ecx
> > + setg %dl
> > + leal -1(%rdx, %rdx), %eax
> > # else
> > + movzbl -(VEC_SIZE * 2)(%rsi, %rax), %ecx
> > + movzbl -(VEC_SIZE * 2)(%rdi, %rax), %eax
> > + subl %ecx, %eax
> > +# endif
> > + VZEROUPPER_RETURN
> > +
> > .p2align 4
> > -L(between_4_7):
> > - /* Load as big endian with overlapping movbe to avoid branches. */
> > - movbe (%rdi), %eax
> > - movbe (%rsi), %ecx
> > - shlq $32, %rax
> > - shlq $32, %rcx
> > - movbe -4(%rdi, %rdx), %edi
> > - movbe -4(%rsi, %rdx), %esi
> > - orq %rdi, %rax
> > - orq %rsi, %rcx
> > - subq %rcx, %rax
> > - je L(exit)
> > - sbbl %eax, %eax
> > - orl $1, %eax
> > - ret
> > +L(return_vec_0_end):
> > + tzcntl %eax, %eax
> > + addl %edx, %eax
> > +# ifdef USE_AS_WMEMCMP
> > + movl -VEC_SIZE(%rdi, %rax), %ecx
> > + xorl %edx, %edx
> > + cmpl -VEC_SIZE(%rsi, %rax), %ecx
> > + setg %dl
> > + leal -1(%rdx, %rdx), %eax
> > +# else
> > + movzbl -VEC_SIZE(%rsi, %rax), %ecx
> > + movzbl -VEC_SIZE(%rdi, %rax), %eax
> > + subl %ecx, %eax
> > +# endif
> > + VZEROUPPER_RETURN
> >
> > .p2align 4
> > -L(exit):
> > - ret
> > +L(less_vec):
> > + /* Check if one or less CHAR. This is necessary for size = 0 but
> > + is also faster for size = CHAR_SIZE. */
> > + cmpl $CHAR_SIZE, %edx
> > + jbe L(one_or_less)
> > +
> > + /* Check if loading one VEC from either s1 or s2 could cause a
> > + page cross. This can have false positives but is by far the
> > + fastest method. */
> > + movl %edi, %eax
> > + orl %esi, %eax
> > + andl $(PAGE_SIZE - 1), %eax
> > + cmpl $(PAGE_SIZE - VEC_SIZE), %eax
> > + jg L(page_cross_less_vec)
> > +
> > + /* No page cross possible. */
> > + vmovdqu (%rsi), %ymm2
> > + VPCMPEQ (%rdi), %ymm2, %ymm2
> > + vpmovmskb %ymm2, %eax
> > + incl %eax
> > + /* Result will be zero if s1 and s2 match. Otherwise first set
> > + bit will be first mismatch. */
> > + bzhil %edx, %eax, %edx
> > + jnz L(return_vec_0)
> > + xorl %eax, %eax
> > + VZEROUPPER_RETURN
> >
> > .p2align 4
> > -L(between_2_3):
> > +L(page_cross_less_vec):
> > + /* if USE_AS_WMEMCMP it can only be 0, 4, 8, 12, 16, 20, 24, 28
> > + bytes. */
> > + cmpl $16, %edx
> > + jae L(between_16_31)
> > +# ifndef USE_AS_WMEMCMP
> > + cmpl $8, %edx
> > + jae L(between_8_15)
> > + cmpl $4, %edx
> > + jae L(between_4_7)
> > +
> > /* Load as big endian to avoid branches. */
> > movzwl (%rdi), %eax
> > movzwl (%rsi), %ecx
> > @@ -208,223 +439,106 @@ L(between_2_3):
> > shll $8, %ecx
> > bswap %eax
> > bswap %ecx
> > - movb -1(%rdi, %rdx), %al
> > - movb -1(%rsi, %rdx), %cl
> > + movzbl -1(%rdi, %rdx), %edi
> > + movzbl -1(%rsi, %rdx), %esi
> > + orl %edi, %eax
> > + orl %esi, %ecx
> > /* Subtraction is okay because the upper 8 bits are zero. */
> > subl %ecx, %eax
> > + /* No ymm register was touched. */
> > ret
> >
> > .p2align 4
> > -L(1):
> > - movzbl (%rdi), %eax
> > +L(one_or_less):
> > + jb L(zero)
> > movzbl (%rsi), %ecx
> > + movzbl (%rdi), %eax
> > subl %ecx, %eax
> > - ret
> > -# endif
> > -
> > - .p2align 4
> > -L(zero):
> > - xorl %eax, %eax
> > + /* No ymm register was touched. */
> > ret
> >
> > .p2align 4
> > -L(less_vec):
> > -# ifdef USE_AS_WMEMCMP
> > - /* It can only be 0, 4, 8, 12, 16, 20, 24, 28 bytes. */
> > - cmpb $4, %dl
> > - je L(4)
> > - jb L(zero)
> > -# else
> > - cmpb $1, %dl
> > - je L(1)
> > - jb L(zero)
> > - cmpb $4, %dl
> > - jb L(between_2_3)
> > - cmpb $8, %dl
> > - jb L(between_4_7)
> > +L(between_8_15):
> > # endif
> > - cmpb $16, %dl
> > - jae L(between_16_31)
> > - /* It is between 8 and 15 bytes. */
> > + /* If USE_AS_WMEMCMP fall through into 8-15 byte case. */
> > vmovq (%rdi), %xmm1
> > vmovq (%rsi), %xmm2
> > - VPCMPEQ %xmm1, %xmm2, %xmm2
> > + VPCMPEQ %xmm1, %xmm2, %xmm2
> > vpmovmskb %xmm2, %eax
> > - subl $0xffff, %eax
> > - jnz L(first_vec)
> > + subl $0xffff, %eax
> > + jnz L(return_vec_0)
> > /* Use overlapping loads to avoid branches. */
> > leaq -8(%rdi, %rdx), %rdi
> > leaq -8(%rsi, %rdx), %rsi
> > vmovq (%rdi), %xmm1
> > vmovq (%rsi), %xmm2
> > - VPCMPEQ %xmm1, %xmm2, %xmm2
> > + VPCMPEQ %xmm1, %xmm2, %xmm2
> > vpmovmskb %xmm2, %eax
> > - subl $0xffff, %eax
> > - jnz L(first_vec)
> > + subl $0xffff, %eax
> > + jnz L(return_vec_0)
> > + /* No ymm register was touched. */
> > + ret
> > +
> > + .p2align 4
> > +L(zero):
> > + xorl %eax, %eax
> > ret
> >
> > .p2align 4
> > L(between_16_31):
> > /* From 16 to 31 bytes. No branch when size == 16. */
> > vmovdqu (%rsi), %xmm2
> > - VPCMPEQ (%rdi), %xmm2, %xmm2
> > + VPCMPEQ (%rdi), %xmm2, %xmm2
> > vpmovmskb %xmm2, %eax
> > - subl $0xffff, %eax
> > - jnz L(first_vec)
> > + subl $0xffff, %eax
> > + jnz L(return_vec_0)
> >
> > /* Use overlapping loads to avoid branches. */
> > +
> > + vmovdqu -16(%rsi, %rdx), %xmm2
> > leaq -16(%rdi, %rdx), %rdi
> > leaq -16(%rsi, %rdx), %rsi
> > - vmovdqu (%rsi), %xmm2
> > - VPCMPEQ (%rdi), %xmm2, %xmm2
> > + VPCMPEQ (%rdi), %xmm2, %xmm2
> > vpmovmskb %xmm2, %eax
> > - subl $0xffff, %eax
> > - jnz L(first_vec)
> > + subl $0xffff, %eax
> > + jnz L(return_vec_0)
> > + /* No ymm register was touched. */
> > ret
> >
> > - .p2align 4
> > -L(more_8x_vec):
> > - /* More than 8 * VEC. Check the first VEC. */
> > - vmovdqu (%rsi), %ymm2
> > - VPCMPEQ (%rdi), %ymm2, %ymm2
> > - vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > -
> > - /* Align the first memory area for aligned loads in the loop.
> > - Compute how much the first memory area is misaligned. */
> > - movq %rdi, %rcx
> > - andl $(VEC_SIZE - 1), %ecx
> > - /* Get the negative of offset for alignment. */
> > - subq $VEC_SIZE, %rcx
> > - /* Adjust the second memory area. */
> > - subq %rcx, %rsi
> > - /* Adjust the first memory area which should be aligned now. */
> > - subq %rcx, %rdi
> > - /* Adjust length. */
> > - addq %rcx, %rdx
> > -
> > -L(loop_4x_vec):
> > - /* Compare 4 * VEC at a time forward. */
> > - vmovdqu (%rsi), %ymm1
> > - VPCMPEQ (%rdi), %ymm1, %ymm1
> > -
> > - vmovdqu VEC_SIZE(%rsi), %ymm2
> > - VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
> > - vpand %ymm2, %ymm1, %ymm5
> > -
> > - vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
> > - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
> > - vpand %ymm3, %ymm5, %ymm5
> > -
> > - vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
> > - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
> > - vpand %ymm4, %ymm5, %ymm5
> > -
> > - vptest %ymm0, %ymm5
> > - jnc L(4x_vec_end)
> > -
> > - addq $(VEC_SIZE * 4), %rdi
> > - addq $(VEC_SIZE * 4), %rsi
> > -
> > - subq $(VEC_SIZE * 4), %rdx
> > - cmpq $(VEC_SIZE * 4), %rdx
> > - jae L(loop_4x_vec)
> > -
> > - /* Less than 4 * VEC. */
> > - cmpq $VEC_SIZE, %rdx
> > - jbe L(last_vec)
> > - cmpq $(VEC_SIZE * 2), %rdx
> > - jbe L(last_2x_vec)
> > -
> > -L(last_4x_vec):
> > - /* From 2 * VEC to 4 * VEC. */
> > - vmovdqu (%rsi), %ymm2
> > - VPCMPEQ (%rdi), %ymm2, %ymm2
> > - vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > -
> > - addq $VEC_SIZE, %rdi
> > - addq $VEC_SIZE, %rsi
> > - vmovdqu (%rsi), %ymm2
> > - VPCMPEQ (%rdi), %ymm2, %ymm2
> > - vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > -
> > - /* Use overlapping loads to avoid branches. */
> > - leaq -(3 * VEC_SIZE)(%rdi, %rdx), %rdi
> > - leaq -(3 * VEC_SIZE)(%rsi, %rdx), %rsi
> > - vmovdqu (%rsi), %ymm2
> > - VPCMPEQ (%rdi), %ymm2, %ymm2
> > - vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > -
> > - addq $VEC_SIZE, %rdi
> > - addq $VEC_SIZE, %rsi
> > - vmovdqu (%rsi), %ymm2
> > - VPCMPEQ (%rdi), %ymm2, %ymm2
> > - vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > - VZEROUPPER_RETURN
> > -
> > - .p2align 4
> > -L(4x_vec_end):
> > - vpmovmskb %ymm1, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec)
> > - vpmovmskb %ymm2, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec_x1)
> > - vpmovmskb %ymm3, %eax
> > - subl $VEC_MASK, %eax
> > - jnz L(first_vec_x2)
> > - vpmovmskb %ymm4, %eax
> > - subl $VEC_MASK, %eax
> > - tzcntl %eax, %ecx
> > # ifdef USE_AS_WMEMCMP
> > - xorl %eax, %eax
> > - movl (VEC_SIZE * 3)(%rdi, %rcx), %edx
> > - cmpl (VEC_SIZE * 3)(%rsi, %rcx), %edx
> > - jmp L(wmemcmp_return)
> > -# else
> > - movzbl (VEC_SIZE * 3)(%rdi, %rcx), %eax
> > - movzbl (VEC_SIZE * 3)(%rsi, %rcx), %edx
> > - sub %edx, %eax
> > -# endif
> > - VZEROUPPER_RETURN
> > -
> > .p2align 4
> > -L(first_vec_x1):
> > - tzcntl %eax, %ecx
> > -# ifdef USE_AS_WMEMCMP
> > - xorl %eax, %eax
> > - movl VEC_SIZE(%rdi, %rcx), %edx
> > - cmpl VEC_SIZE(%rsi, %rcx), %edx
> > - jmp L(wmemcmp_return)
> > +L(one_or_less):
> > + jb L(zero)
> > + movl (%rdi), %ecx
> > + xorl %edx, %edx
> > + cmpl (%rsi), %ecx
> > + je L(zero)
> > + setg %dl
> > + leal -1(%rdx, %rdx), %eax
> > + /* No ymm register was touched. */
> > + ret
> > # else
> > - movzbl VEC_SIZE(%rdi, %rcx), %eax
> > - movzbl VEC_SIZE(%rsi, %rcx), %edx
> > - sub %edx, %eax
> > -# endif
> > - VZEROUPPER_RETURN
> >
> > .p2align 4
> > -L(first_vec_x2):
> > - tzcntl %eax, %ecx
> > -# ifdef USE_AS_WMEMCMP
> > - xorl %eax, %eax
> > - movl (VEC_SIZE * 2)(%rdi, %rcx), %edx
> > - cmpl (VEC_SIZE * 2)(%rsi, %rcx), %edx
> > - jmp L(wmemcmp_return)
> > -# else
> > - movzbl (VEC_SIZE * 2)(%rdi, %rcx), %eax
> > - movzbl (VEC_SIZE * 2)(%rsi, %rcx), %edx
> > - sub %edx, %eax
> > +L(between_4_7):
> > + /* Load as big endian with overlapping movbe to avoid branches.
> > + */
> > + movbe (%rdi), %eax
> > + movbe (%rsi), %ecx
> > + shlq $32, %rax
> > + shlq $32, %rcx
> > + movbe -4(%rdi, %rdx), %edi
> > + movbe -4(%rsi, %rdx), %esi
> > + orq %rdi, %rax
> > + orq %rsi, %rcx
> > + subq %rcx, %rax
> > + jz L(zero_4_7)
> > + sbbl %eax, %eax
> > + orl $1, %eax
> > +L(zero_4_7):
> > + /* No ymm register was touched. */
> > + ret
> > # endif
> > - VZEROUPPER_RETURN
> > +
> > END (MEMCMP)
> > #endif
> > --
> > 2.29.2
> >
>
>
> H.J.
@@ -33,6 +33,7 @@ IFUNC_SELECTOR (void)
if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)
&& CPU_FEATURE_USABLE_P (cpu_features, MOVBE)
+ && CPU_FEATURE_USABLE_P (cpu_features, BMI2)
&& CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
{
if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
@@ -19,17 +19,23 @@
#if IS_IN (libc)
/* memcmp/wmemcmp is implemented as:
- 1. For size from 2 to 7 bytes, load as big endian with movbe and bswap
- to avoid branches.
- 2. Use overlapping compare to avoid branch.
- 3. Use vector compare when size >= 4 bytes for memcmp or size >= 8
- bytes for wmemcmp.
- 4. If size is 8 * VEC_SIZE or less, unroll the loop.
- 5. Compare 4 * VEC_SIZE at a time with the aligned first memory
+ 1. Use ymm vector compares when possible. The only case where
+ vector compares is not possible for when size < VEC_SIZE
+ and loading from either s1 or s2 would cause a page cross.
+ 2. For size from 2 to 7 bytes on page cross, load as big endian
+ with movbe and bswap to avoid branches.
+ 3. Use xmm vector compare when size >= 4 bytes for memcmp or
+ size >= 8 bytes for wmemcmp.
+ 4. Optimistically compare up to first 4 * VEC_SIZE one at a
+ to check for early mismatches. Only do this if its guranteed the
+ work is not wasted.
+ 5. If size is 8 * VEC_SIZE or less, unroll the loop.
+ 6. Compare 4 * VEC_SIZE at a time with the aligned first memory
area.
- 6. Use 2 vector compares when size is 2 * VEC_SIZE or less.
- 7. Use 4 vector compares when size is 4 * VEC_SIZE or less.
- 8. Use 8 vector compares when size is 8 * VEC_SIZE or less. */
+ 7. Use 2 vector compares when size is 2 * VEC_SIZE or less.
+ 8. Use 4 vector compares when size is 4 * VEC_SIZE or less.
+ 9. Use 8 vector compares when size is 8 * VEC_SIZE or less. */
+
# include <sysdep.h>
@@ -38,8 +44,10 @@
# endif
# ifdef USE_AS_WMEMCMP
+# define CHAR_SIZE 4
# define VPCMPEQ vpcmpeqd
# else
+# define CHAR_SIZE 1
# define VPCMPEQ vpcmpeqb
# endif
@@ -52,7 +60,7 @@
# endif
# define VEC_SIZE 32
-# define VEC_MASK ((1 << VEC_SIZE) - 1)
+# define PAGE_SIZE 4096
/* Warning!
wmemcmp has to use SIGNED comparison for elements.
@@ -71,136 +79,359 @@ ENTRY (MEMCMP)
jb L(less_vec)
/* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */
- vmovdqu (%rsi), %ymm2
- VPCMPEQ (%rdi), %ymm2, %ymm2
- vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
+ vmovdqu (%rsi), %ymm1
+ VPCMPEQ (%rdi), %ymm1, %ymm1
+ vpmovmskb %ymm1, %eax
+ /* NB: eax must be destination register if going to
+ L(return_vec_[0,2]). For L(return_vec_3 destination register
+ must be ecx. */
+ incl %eax
+ jnz L(return_vec_0)
cmpq $(VEC_SIZE * 2), %rdx
- jbe L(last_vec)
-
- VPCMPEQ %ymm0, %ymm0, %ymm0
- /* More than 2 * VEC. */
- cmpq $(VEC_SIZE * 8), %rdx
- ja L(more_8x_vec)
- cmpq $(VEC_SIZE * 4), %rdx
- jb L(last_4x_vec)
-
- /* From 4 * VEC to 8 * VEC, inclusively. */
- vmovdqu (%rsi), %ymm1
- VPCMPEQ (%rdi), %ymm1, %ymm1
+ jbe L(last_1x_vec)
+ /* Check second VEC no matter what. */
vmovdqu VEC_SIZE(%rsi), %ymm2
- VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
+ VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
+ vpmovmskb %ymm2, %eax
+ /* If all 4 VEC where equal eax will be all 1s so incl will
+ overflow and set zero flag. */
+ incl %eax
+ jnz L(return_vec_1)
- vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
- VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
+ /* Less than 4 * VEC. */
+ cmpq $(VEC_SIZE * 4), %rdx
+ jbe L(last_2x_vec)
+ /* Check third and fourth VEC no matter what. */
+ vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
+ vpmovmskb %ymm3, %eax
+ incl %eax
+ jnz L(return_vec_2)
vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
- VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
+ vpmovmskb %ymm4, %ecx
+ incl %ecx
+ jnz L(return_vec_3)
- vpand %ymm1, %ymm2, %ymm5
- vpand %ymm3, %ymm4, %ymm6
- vpand %ymm5, %ymm6, %ymm5
+ /* Go to 4x VEC loop. */
+ cmpq $(VEC_SIZE * 8), %rdx
+ ja L(more_8x_vec)
- vptest %ymm0, %ymm5
- jnc L(4x_vec_end)
+ /* Handle remainder of size = 4 * VEC + 1 to 8 * VEC without any
+ branches. */
+ /* Load first two VEC from s2 before adjusting addresses. */
+ vmovdqu -(VEC_SIZE * 4)(%rsi, %rdx), %ymm1
+ vmovdqu -(VEC_SIZE * 3)(%rsi, %rdx), %ymm2
leaq -(4 * VEC_SIZE)(%rdi, %rdx), %rdi
leaq -(4 * VEC_SIZE)(%rsi, %rdx), %rsi
- vmovdqu (%rsi), %ymm1
- VPCMPEQ (%rdi), %ymm1, %ymm1
- vmovdqu VEC_SIZE(%rsi), %ymm2
- VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
- vpand %ymm2, %ymm1, %ymm5
+ /* Wait to load from s1 until addressed adjust due to
+ unlamination of microfusion with complex address mode. */
+ VPCMPEQ (%rdi), %ymm1, %ymm1
+ VPCMPEQ (VEC_SIZE)(%rdi), %ymm2, %ymm2
vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
- VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
- vpand %ymm3, %ymm5, %ymm5
-
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
- VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
- vpand %ymm4, %ymm5, %ymm5
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
- vptest %ymm0, %ymm5
- jnc L(4x_vec_end)
- xorl %eax, %eax
+ /* Reduce VEC0 - VEC4. */
+ vpand %ymm1, %ymm2, %ymm5
+ vpand %ymm3, %ymm4, %ymm6
+ vpand %ymm5, %ymm6, %ymm7
+ vpmovmskb %ymm7, %ecx
+ incl %ecx
+ jnz L(return_vec_0_1_2_3)
+ /* NB: eax must be zero to reach here. */
+ VZEROUPPER_RETURN
+
+ .p2align 4
+L(return_vec_0):
+ tzcntl %eax, %eax
+# ifdef USE_AS_WMEMCMP
+ movl (%rdi, %rax), %ecx
+ xorl %edx, %edx
+ cmpl (%rsi, %rax), %ecx
+ /* NB: no partial register stall here because xorl zero idiom
+ above. */
+ setg %dl
+ leal -1(%rdx, %rdx), %eax
+# else
+ movzbl (%rsi, %rax), %ecx
+ movzbl (%rdi, %rax), %eax
+ subl %ecx, %eax
+# endif
L(return_vzeroupper):
ZERO_UPPER_VEC_REGISTERS_RETURN
.p2align 4
-L(last_2x_vec):
- /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */
- vmovdqu (%rsi), %ymm2
- VPCMPEQ (%rdi), %ymm2, %ymm2
- vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
+L(return_vec_1):
+ tzcntl %eax, %eax
+# ifdef USE_AS_WMEMCMP
+ movl VEC_SIZE(%rdi, %rax), %ecx
+ xorl %edx, %edx
+ cmpl VEC_SIZE(%rsi, %rax), %ecx
+ setg %dl
+ leal -1(%rdx, %rdx), %eax
+# else
+ movzbl VEC_SIZE(%rsi, %rax), %ecx
+ movzbl VEC_SIZE(%rdi, %rax), %eax
+ subl %ecx, %eax
+# endif
+ VZEROUPPER_RETURN
+
+ .p2align 4
+L(return_vec_2):
+ tzcntl %eax, %eax
+# ifdef USE_AS_WMEMCMP
+ movl (VEC_SIZE * 2)(%rdi, %rax), %ecx
+ xorl %edx, %edx
+ cmpl (VEC_SIZE * 2)(%rsi, %rax), %ecx
+ setg %dl
+ leal -1(%rdx, %rdx), %eax
+# else
+ movzbl (VEC_SIZE * 2)(%rsi, %rax), %ecx
+ movzbl (VEC_SIZE * 2)(%rdi, %rax), %eax
+ subl %ecx, %eax
+# endif
+ VZEROUPPER_RETURN
+
+ /* NB: p2align 5 here to ensure 4x loop is 32 byte aligned. */
+ .p2align 5
+L(8x_return_vec_0_1_2_3):
+ /* Returning from L(more_8x_vec) requires restoring rsi. */
+ addq %rdi, %rsi
+L(return_vec_0_1_2_3):
+ vpmovmskb %ymm1, %eax
+ incl %eax
+ jnz L(return_vec_0)
-L(last_vec):
- /* Use overlapping loads to avoid branches. */
- leaq -VEC_SIZE(%rdi, %rdx), %rdi
- leaq -VEC_SIZE(%rsi, %rdx), %rsi
- vmovdqu (%rsi), %ymm2
- VPCMPEQ (%rdi), %ymm2, %ymm2
vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
+ incl %eax
+ jnz L(return_vec_1)
+
+ vpmovmskb %ymm3, %eax
+ incl %eax
+ jnz L(return_vec_2)
+L(return_vec_3):
+ tzcntl %ecx, %ecx
+# ifdef USE_AS_WMEMCMP
+ movl (VEC_SIZE * 3)(%rdi, %rcx), %eax
+ xorl %edx, %edx
+ cmpl (VEC_SIZE * 3)(%rsi, %rcx), %eax
+ setg %dl
+ leal -1(%rdx, %rdx), %eax
+# else
+ movzbl (VEC_SIZE * 3)(%rdi, %rcx), %eax
+ movzbl (VEC_SIZE * 3)(%rsi, %rcx), %ecx
+ subl %ecx, %eax
+# endif
+ VZEROUPPER_RETURN
+
+ .p2align 4
+L(more_8x_vec):
+ /* Set end of s1 in rdx. */
+ leaq -(VEC_SIZE * 4)(%rdi, %rdx), %rdx
+ /* rsi stores s2 - s1. This allows loop to only update one
+ pointer. */
+ subq %rdi, %rsi
+ /* Align s1 pointer. */
+ andq $-VEC_SIZE, %rdi
+ /* Adjust because first 4x vec where check already. */
+ subq $-(VEC_SIZE * 4), %rdi
+ .p2align 4
+L(loop_4x_vec):
+ /* rsi has s2 - s1 so get correct address by adding s1 (in rdi).
+ */
+ vmovdqu (%rsi, %rdi), %ymm1
+ VPCMPEQ (%rdi), %ymm1, %ymm1
+
+ vmovdqu VEC_SIZE(%rsi, %rdi), %ymm2
+ VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
+
+ vmovdqu (VEC_SIZE * 2)(%rsi, %rdi), %ymm3
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
+
+ vmovdqu (VEC_SIZE * 3)(%rsi, %rdi), %ymm4
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
+
+ vpand %ymm1, %ymm2, %ymm5
+ vpand %ymm3, %ymm4, %ymm6
+ vpand %ymm5, %ymm6, %ymm7
+ vpmovmskb %ymm7, %ecx
+ incl %ecx
+ jnz L(8x_return_vec_0_1_2_3)
+ subq $-(VEC_SIZE * 4), %rdi
+ /* Check if s1 pointer at end. */
+ cmpq %rdx, %rdi
+ jb L(loop_4x_vec)
+
+ subq %rdx, %rdi
+ /* rdi has 4 * VEC_SIZE - remaining length. */
+ cmpl $(VEC_SIZE * 3), %edi
+ jae L(8x_last_1x_vec)
+ /* Load regardless of branch. */
+ vmovdqu (VEC_SIZE * 2)(%rsi, %rdx), %ymm3
+ cmpl $(VEC_SIZE * 2), %edi
+ jae L(8x_last_2x_vec)
+
+ /* Check last 4 VEC. */
+ vmovdqu (%rsi, %rdx), %ymm1
+ VPCMPEQ (%rdx), %ymm1, %ymm1
+
+ vmovdqu VEC_SIZE(%rsi, %rdx), %ymm2
+ VPCMPEQ VEC_SIZE(%rdx), %ymm2, %ymm2
+
+ VPCMPEQ (VEC_SIZE * 2)(%rdx), %ymm3, %ymm3
+
+ vmovdqu (VEC_SIZE * 3)(%rsi, %rdx), %ymm4
+ VPCMPEQ (VEC_SIZE * 3)(%rdx), %ymm4, %ymm4
+
+ vpand %ymm1, %ymm2, %ymm5
+ vpand %ymm3, %ymm4, %ymm6
+ vpand %ymm5, %ymm6, %ymm7
+ vpmovmskb %ymm7, %ecx
+ /* Restore s1 pointer to rdi. */
+ movq %rdx, %rdi
+ incl %ecx
+ jnz L(8x_return_vec_0_1_2_3)
+ /* NB: eax must be zero to reach here. */
+ VZEROUPPER_RETURN
+
+ /* Only entry is from L(more_8x_vec). */
+ .p2align 4
+L(8x_last_2x_vec):
+ /* Check second to last VEC. rdx store end pointer of s1 and
+ ymm3 has already been loaded with second to last VEC from s2.
+ */
+ VPCMPEQ (VEC_SIZE * 2)(%rdx), %ymm3, %ymm3
+ vpmovmskb %ymm3, %eax
+ incl %eax
+ jnz L(8x_return_vec_2)
+ /* Check last VEC. */
+ .p2align 4
+L(8x_last_1x_vec):
+ vmovdqu (VEC_SIZE * 3)(%rsi, %rdx), %ymm4
+ VPCMPEQ (VEC_SIZE * 3)(%rdx), %ymm4, %ymm4
+ vpmovmskb %ymm4, %eax
+ incl %eax
+ jnz L(8x_return_vec_3)
VZEROUPPER_RETURN
.p2align 4
-L(first_vec):
- /* A byte or int32 is different within 16 or 32 bytes. */
- tzcntl %eax, %ecx
+L(last_2x_vec):
+ /* Check second to last VEC. */
+ vmovdqu -(VEC_SIZE * 2)(%rsi, %rdx), %ymm1
+ VPCMPEQ -(VEC_SIZE * 2)(%rdi, %rdx), %ymm1, %ymm1
+ vpmovmskb %ymm1, %eax
+ incl %eax
+ jnz L(return_vec_1_end)
+ /* Check last VEC. */
+L(last_1x_vec):
+ vmovdqu -(VEC_SIZE * 1)(%rsi, %rdx), %ymm1
+ VPCMPEQ -(VEC_SIZE * 1)(%rdi, %rdx), %ymm1, %ymm1
+ vpmovmskb %ymm1, %eax
+ incl %eax
+ jnz L(return_vec_0_end)
+ VZEROUPPER_RETURN
+
+ .p2align 4
+L(8x_return_vec_2):
+ subq $VEC_SIZE, %rdx
+L(8x_return_vec_3):
+ tzcntl %eax, %eax
+ addq %rdx, %rax
# ifdef USE_AS_WMEMCMP
- xorl %eax, %eax
- movl (%rdi, %rcx), %edx
- cmpl (%rsi, %rcx), %edx
-L(wmemcmp_return):
- setl %al
- negl %eax
- orl $1, %eax
+ movl (VEC_SIZE * 3)(%rax), %ecx
+ xorl %edx, %edx
+ cmpl (VEC_SIZE * 3)(%rsi, %rax), %ecx
+ setg %dl
+ leal -1(%rdx, %rdx), %eax
# else
- movzbl (%rdi, %rcx), %eax
- movzbl (%rsi, %rcx), %edx
- sub %edx, %eax
+ movzbl (VEC_SIZE * 3)(%rsi, %rax), %ecx
+ movzbl (VEC_SIZE * 3)(%rax), %eax
+ subl %ecx, %eax
# endif
VZEROUPPER_RETURN
-# ifdef USE_AS_WMEMCMP
.p2align 4
-L(4):
- xorl %eax, %eax
- movl (%rdi), %edx
- cmpl (%rsi), %edx
- jne L(wmemcmp_return)
- ret
+L(return_vec_1_end):
+ tzcntl %eax, %eax
+ addl %edx, %eax
+# ifdef USE_AS_WMEMCMP
+ movl -(VEC_SIZE * 2)(%rdi, %rax), %ecx
+ xorl %edx, %edx
+ cmpl -(VEC_SIZE * 2)(%rsi, %rax), %ecx
+ setg %dl
+ leal -1(%rdx, %rdx), %eax
# else
+ movzbl -(VEC_SIZE * 2)(%rsi, %rax), %ecx
+ movzbl -(VEC_SIZE * 2)(%rdi, %rax), %eax
+ subl %ecx, %eax
+# endif
+ VZEROUPPER_RETURN
+
.p2align 4
-L(between_4_7):
- /* Load as big endian with overlapping movbe to avoid branches. */
- movbe (%rdi), %eax
- movbe (%rsi), %ecx
- shlq $32, %rax
- shlq $32, %rcx
- movbe -4(%rdi, %rdx), %edi
- movbe -4(%rsi, %rdx), %esi
- orq %rdi, %rax
- orq %rsi, %rcx
- subq %rcx, %rax
- je L(exit)
- sbbl %eax, %eax
- orl $1, %eax
- ret
+L(return_vec_0_end):
+ tzcntl %eax, %eax
+ addl %edx, %eax
+# ifdef USE_AS_WMEMCMP
+ movl -VEC_SIZE(%rdi, %rax), %ecx
+ xorl %edx, %edx
+ cmpl -VEC_SIZE(%rsi, %rax), %ecx
+ setg %dl
+ leal -1(%rdx, %rdx), %eax
+# else
+ movzbl -VEC_SIZE(%rsi, %rax), %ecx
+ movzbl -VEC_SIZE(%rdi, %rax), %eax
+ subl %ecx, %eax
+# endif
+ VZEROUPPER_RETURN
.p2align 4
-L(exit):
- ret
+L(less_vec):
+ /* Check if one or less CHAR. This is necessary for size = 0 but
+ is also faster for size = CHAR_SIZE. */
+ cmpl $CHAR_SIZE, %edx
+ jbe L(one_or_less)
+
+ /* Check if loading one VEC from either s1 or s2 could cause a
+ page cross. This can have false positives but is by far the
+ fastest method. */
+ movl %edi, %eax
+ orl %esi, %eax
+ andl $(PAGE_SIZE - 1), %eax
+ cmpl $(PAGE_SIZE - VEC_SIZE), %eax
+ jg L(page_cross_less_vec)
+
+ /* No page cross possible. */
+ vmovdqu (%rsi), %ymm2
+ VPCMPEQ (%rdi), %ymm2, %ymm2
+ vpmovmskb %ymm2, %eax
+ incl %eax
+ /* Result will be zero if s1 and s2 match. Otherwise first set
+ bit will be first mismatch. */
+ bzhil %edx, %eax, %edx
+ jnz L(return_vec_0)
+ xorl %eax, %eax
+ VZEROUPPER_RETURN
.p2align 4
-L(between_2_3):
+L(page_cross_less_vec):
+ /* if USE_AS_WMEMCMP it can only be 0, 4, 8, 12, 16, 20, 24, 28
+ bytes. */
+ cmpl $16, %edx
+ jae L(between_16_31)
+# ifndef USE_AS_WMEMCMP
+ cmpl $8, %edx
+ jae L(between_8_15)
+ cmpl $4, %edx
+ jae L(between_4_7)
+
/* Load as big endian to avoid branches. */
movzwl (%rdi), %eax
movzwl (%rsi), %ecx
@@ -208,223 +439,106 @@ L(between_2_3):
shll $8, %ecx
bswap %eax
bswap %ecx
- movb -1(%rdi, %rdx), %al
- movb -1(%rsi, %rdx), %cl
+ movzbl -1(%rdi, %rdx), %edi
+ movzbl -1(%rsi, %rdx), %esi
+ orl %edi, %eax
+ orl %esi, %ecx
/* Subtraction is okay because the upper 8 bits are zero. */
subl %ecx, %eax
+ /* No ymm register was touched. */
ret
.p2align 4
-L(1):
- movzbl (%rdi), %eax
+L(one_or_less):
+ jb L(zero)
movzbl (%rsi), %ecx
+ movzbl (%rdi), %eax
subl %ecx, %eax
- ret
-# endif
-
- .p2align 4
-L(zero):
- xorl %eax, %eax
+ /* No ymm register was touched. */
ret
.p2align 4
-L(less_vec):
-# ifdef USE_AS_WMEMCMP
- /* It can only be 0, 4, 8, 12, 16, 20, 24, 28 bytes. */
- cmpb $4, %dl
- je L(4)
- jb L(zero)
-# else
- cmpb $1, %dl
- je L(1)
- jb L(zero)
- cmpb $4, %dl
- jb L(between_2_3)
- cmpb $8, %dl
- jb L(between_4_7)
+L(between_8_15):
# endif
- cmpb $16, %dl
- jae L(between_16_31)
- /* It is between 8 and 15 bytes. */
+ /* If USE_AS_WMEMCMP fall through into 8-15 byte case. */
vmovq (%rdi), %xmm1
vmovq (%rsi), %xmm2
- VPCMPEQ %xmm1, %xmm2, %xmm2
+ VPCMPEQ %xmm1, %xmm2, %xmm2
vpmovmskb %xmm2, %eax
- subl $0xffff, %eax
- jnz L(first_vec)
+ subl $0xffff, %eax
+ jnz L(return_vec_0)
/* Use overlapping loads to avoid branches. */
leaq -8(%rdi, %rdx), %rdi
leaq -8(%rsi, %rdx), %rsi
vmovq (%rdi), %xmm1
vmovq (%rsi), %xmm2
- VPCMPEQ %xmm1, %xmm2, %xmm2
+ VPCMPEQ %xmm1, %xmm2, %xmm2
vpmovmskb %xmm2, %eax
- subl $0xffff, %eax
- jnz L(first_vec)
+ subl $0xffff, %eax
+ jnz L(return_vec_0)
+ /* No ymm register was touched. */
+ ret
+
+ .p2align 4
+L(zero):
+ xorl %eax, %eax
ret
.p2align 4
L(between_16_31):
/* From 16 to 31 bytes. No branch when size == 16. */
vmovdqu (%rsi), %xmm2
- VPCMPEQ (%rdi), %xmm2, %xmm2
+ VPCMPEQ (%rdi), %xmm2, %xmm2
vpmovmskb %xmm2, %eax
- subl $0xffff, %eax
- jnz L(first_vec)
+ subl $0xffff, %eax
+ jnz L(return_vec_0)
/* Use overlapping loads to avoid branches. */
+
+ vmovdqu -16(%rsi, %rdx), %xmm2
leaq -16(%rdi, %rdx), %rdi
leaq -16(%rsi, %rdx), %rsi
- vmovdqu (%rsi), %xmm2
- VPCMPEQ (%rdi), %xmm2, %xmm2
+ VPCMPEQ (%rdi), %xmm2, %xmm2
vpmovmskb %xmm2, %eax
- subl $0xffff, %eax
- jnz L(first_vec)
+ subl $0xffff, %eax
+ jnz L(return_vec_0)
+ /* No ymm register was touched. */
ret
- .p2align 4
-L(more_8x_vec):
- /* More than 8 * VEC. Check the first VEC. */
- vmovdqu (%rsi), %ymm2
- VPCMPEQ (%rdi), %ymm2, %ymm2
- vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
-
- /* Align the first memory area for aligned loads in the loop.
- Compute how much the first memory area is misaligned. */
- movq %rdi, %rcx
- andl $(VEC_SIZE - 1), %ecx
- /* Get the negative of offset for alignment. */
- subq $VEC_SIZE, %rcx
- /* Adjust the second memory area. */
- subq %rcx, %rsi
- /* Adjust the first memory area which should be aligned now. */
- subq %rcx, %rdi
- /* Adjust length. */
- addq %rcx, %rdx
-
-L(loop_4x_vec):
- /* Compare 4 * VEC at a time forward. */
- vmovdqu (%rsi), %ymm1
- VPCMPEQ (%rdi), %ymm1, %ymm1
-
- vmovdqu VEC_SIZE(%rsi), %ymm2
- VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2
- vpand %ymm2, %ymm1, %ymm5
-
- vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3
- VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3
- vpand %ymm3, %ymm5, %ymm5
-
- vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4
- VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4
- vpand %ymm4, %ymm5, %ymm5
-
- vptest %ymm0, %ymm5
- jnc L(4x_vec_end)
-
- addq $(VEC_SIZE * 4), %rdi
- addq $(VEC_SIZE * 4), %rsi
-
- subq $(VEC_SIZE * 4), %rdx
- cmpq $(VEC_SIZE * 4), %rdx
- jae L(loop_4x_vec)
-
- /* Less than 4 * VEC. */
- cmpq $VEC_SIZE, %rdx
- jbe L(last_vec)
- cmpq $(VEC_SIZE * 2), %rdx
- jbe L(last_2x_vec)
-
-L(last_4x_vec):
- /* From 2 * VEC to 4 * VEC. */
- vmovdqu (%rsi), %ymm2
- VPCMPEQ (%rdi), %ymm2, %ymm2
- vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
-
- addq $VEC_SIZE, %rdi
- addq $VEC_SIZE, %rsi
- vmovdqu (%rsi), %ymm2
- VPCMPEQ (%rdi), %ymm2, %ymm2
- vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
-
- /* Use overlapping loads to avoid branches. */
- leaq -(3 * VEC_SIZE)(%rdi, %rdx), %rdi
- leaq -(3 * VEC_SIZE)(%rsi, %rdx), %rsi
- vmovdqu (%rsi), %ymm2
- VPCMPEQ (%rdi), %ymm2, %ymm2
- vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
-
- addq $VEC_SIZE, %rdi
- addq $VEC_SIZE, %rsi
- vmovdqu (%rsi), %ymm2
- VPCMPEQ (%rdi), %ymm2, %ymm2
- vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
- VZEROUPPER_RETURN
-
- .p2align 4
-L(4x_vec_end):
- vpmovmskb %ymm1, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec)
- vpmovmskb %ymm2, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec_x1)
- vpmovmskb %ymm3, %eax
- subl $VEC_MASK, %eax
- jnz L(first_vec_x2)
- vpmovmskb %ymm4, %eax
- subl $VEC_MASK, %eax
- tzcntl %eax, %ecx
# ifdef USE_AS_WMEMCMP
- xorl %eax, %eax
- movl (VEC_SIZE * 3)(%rdi, %rcx), %edx
- cmpl (VEC_SIZE * 3)(%rsi, %rcx), %edx
- jmp L(wmemcmp_return)
-# else
- movzbl (VEC_SIZE * 3)(%rdi, %rcx), %eax
- movzbl (VEC_SIZE * 3)(%rsi, %rcx), %edx
- sub %edx, %eax
-# endif
- VZEROUPPER_RETURN
-
.p2align 4
-L(first_vec_x1):
- tzcntl %eax, %ecx
-# ifdef USE_AS_WMEMCMP
- xorl %eax, %eax
- movl VEC_SIZE(%rdi, %rcx), %edx
- cmpl VEC_SIZE(%rsi, %rcx), %edx
- jmp L(wmemcmp_return)
+L(one_or_less):
+ jb L(zero)
+ movl (%rdi), %ecx
+ xorl %edx, %edx
+ cmpl (%rsi), %ecx
+ je L(zero)
+ setg %dl
+ leal -1(%rdx, %rdx), %eax
+ /* No ymm register was touched. */
+ ret
# else
- movzbl VEC_SIZE(%rdi, %rcx), %eax
- movzbl VEC_SIZE(%rsi, %rcx), %edx
- sub %edx, %eax
-# endif
- VZEROUPPER_RETURN
.p2align 4
-L(first_vec_x2):
- tzcntl %eax, %ecx
-# ifdef USE_AS_WMEMCMP
- xorl %eax, %eax
- movl (VEC_SIZE * 2)(%rdi, %rcx), %edx
- cmpl (VEC_SIZE * 2)(%rsi, %rcx), %edx
- jmp L(wmemcmp_return)
-# else
- movzbl (VEC_SIZE * 2)(%rdi, %rcx), %eax
- movzbl (VEC_SIZE * 2)(%rsi, %rcx), %edx
- sub %edx, %eax
+L(between_4_7):
+ /* Load as big endian with overlapping movbe to avoid branches.
+ */
+ movbe (%rdi), %eax
+ movbe (%rsi), %ecx
+ shlq $32, %rax
+ shlq $32, %rcx
+ movbe -4(%rdi, %rdx), %edi
+ movbe -4(%rsi, %rdx), %esi
+ orq %rdi, %rax
+ orq %rsi, %rcx
+ subq %rcx, %rax
+ jz L(zero_4_7)
+ sbbl %eax, %eax
+ orl $1, %eax
+L(zero_4_7):
+ /* No ymm register was touched. */
+ ret
# endif
- VZEROUPPER_RETURN
+
END (MEMCMP)
#endif