[v2] x86_64: Implement evex512 version of strlen, strnlen, wcslen and wcsnlen
Checks
Context |
Check |
Description |
dj/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
dj/TryBot-32bit |
success
|
Build for i686
|
Commit Message
This patch implements following evex512 version of string functions.
Perf gain up to 50% as compared to evex, depending on length and
alignment.
- String length function using 512 bit vectors.
- String N length using 512 bit vectors.
- Wide string length using 512 bit vectors.
- Wide string N length using 512 bit vectors.
---
sysdeps/x86_64/multiarch/Makefile | 4 +
sysdeps/x86_64/multiarch/ifunc-impl-list.c | 20 ++
sysdeps/x86_64/multiarch/strlen-evex512.S | 291 +++++++++++++++++++++
sysdeps/x86_64/multiarch/strnlen-evex512.S | 4 +
sysdeps/x86_64/multiarch/wcslen-evex512.S | 4 +
sysdeps/x86_64/multiarch/wcsnlen-evex512.S | 5 +
6 files changed, 328 insertions(+)
create mode 100644 sysdeps/x86_64/multiarch/strlen-evex512.S
create mode 100644 sysdeps/x86_64/multiarch/strnlen-evex512.S
create mode 100644 sysdeps/x86_64/multiarch/wcslen-evex512.S
create mode 100644 sysdeps/x86_64/multiarch/wcsnlen-evex512.S
Comments
On Wed, May 18, 2022 at 10:48 PM Sunil K Pandey via Libc-alpha
<libc-alpha@sourceware.org> wrote:
>
> This patch implements following evex512 version of string functions.
> Perf gain up to 50% as compared to evex, depending on length and
> alignment.
>
> - String length function using 512 bit vectors.
> - String N length using 512 bit vectors.
> - Wide string length using 512 bit vectors.
> - Wide string N length using 512 bit vectors.
> ---
> sysdeps/x86_64/multiarch/Makefile | 4 +
> sysdeps/x86_64/multiarch/ifunc-impl-list.c | 20 ++
> sysdeps/x86_64/multiarch/strlen-evex512.S | 291 +++++++++++++++++++++
> sysdeps/x86_64/multiarch/strnlen-evex512.S | 4 +
> sysdeps/x86_64/multiarch/wcslen-evex512.S | 4 +
> sysdeps/x86_64/multiarch/wcsnlen-evex512.S | 5 +
> 6 files changed, 328 insertions(+)
> create mode 100644 sysdeps/x86_64/multiarch/strlen-evex512.S
> create mode 100644 sysdeps/x86_64/multiarch/strnlen-evex512.S
> create mode 100644 sysdeps/x86_64/multiarch/wcslen-evex512.S
> create mode 100644 sysdeps/x86_64/multiarch/wcsnlen-evex512.S
>
> diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
> index f3ab5e0928..d0869c3ac3 100644
> --- a/sysdeps/x86_64/multiarch/Makefile
> +++ b/sysdeps/x86_64/multiarch/Makefile
> @@ -81,6 +81,7 @@ sysdep_routines += \
> strlen-avx2 \
> strlen-avx2-rtm \
> strlen-evex \
> + strlen-evex512 \
> strlen-sse2 \
> strncase_l-avx2 \
> strncase_l-avx2-rtm \
> @@ -105,6 +106,7 @@ sysdep_routines += \
> strnlen-avx2 \
> strnlen-avx2-rtm \
> strnlen-evex \
> + strnlen-evex512 \
> strnlen-sse2 \
> strpbrk-c \
> strpbrk-sse2 \
> @@ -138,6 +140,7 @@ sysdep_routines += \
> wcslen-avx2 \
> wcslen-avx2-rtm \
> wcslen-evex \
> + wcslen-evex512 \
> wcslen-sse2 \
> wcslen-sse4_1 \
> wcsncmp-avx2 \
> @@ -148,6 +151,7 @@ sysdep_routines += \
> wcsnlen-avx2-rtm \
> wcsnlen-c \
> wcsnlen-evex \
> + wcsnlen-evex512 \
> wcsnlen-sse4_1 \
> wcsrchr-avx2 \
> wcsrchr-avx2-rtm \
> diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> index 7218095430..c5cd9466fe 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> @@ -328,6 +328,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
> && CPU_FEATURE_USABLE (AVX512BW)
> && CPU_FEATURE_USABLE (BMI2)),
> __strlen_evex)
> + IFUNC_IMPL_ADD (array, i, strlen,
> + (CPU_FEATURE_USABLE (AVX512VL)
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> + __strlen_evex512)
> IFUNC_IMPL_ADD (array, i, strlen, 1, __strlen_sse2))
>
> /* Support sysdeps/x86_64/multiarch/strnlen.c. */
> @@ -346,6 +351,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
> && CPU_FEATURE_USABLE (AVX512BW)
> && CPU_FEATURE_USABLE (BMI2)),
> __strnlen_evex)
> + IFUNC_IMPL_ADD (array, i, strnlen,
> + (CPU_FEATURE_USABLE (AVX512VL)
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> + __strnlen_evex512)
> IFUNC_IMPL_ADD (array, i, strnlen, 1, __strnlen_sse2))
>
> /* Support sysdeps/x86_64/multiarch/stpncpy.c. */
> @@ -699,6 +709,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
> && CPU_FEATURE_USABLE (AVX512BW)
> && CPU_FEATURE_USABLE (BMI2)),
> __wcslen_evex)
> + IFUNC_IMPL_ADD (array, i, wcslen,
> + (CPU_FEATURE_USABLE (AVX512VL)
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> + __wcslen_evex512)
> IFUNC_IMPL_ADD (array, i, wcslen,
> CPU_FEATURE_USABLE (SSE4_1),
> __wcslen_sse4_1)
> @@ -720,6 +735,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
> && CPU_FEATURE_USABLE (AVX512BW)
> && CPU_FEATURE_USABLE (BMI2)),
> __wcsnlen_evex)
> + IFUNC_IMPL_ADD (array, i, wcsnlen,
> + (CPU_FEATURE_USABLE (AVX512VL)
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> + __wcsnlen_evex512)
> IFUNC_IMPL_ADD (array, i, wcsnlen,
> CPU_FEATURE_USABLE (SSE4_1),
> __wcsnlen_sse4_1)
> diff --git a/sysdeps/x86_64/multiarch/strlen-evex512.S b/sysdeps/x86_64/multiarch/strlen-evex512.S
> new file mode 100644
> index 0000000000..0a2d7bbb1a
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/strlen-evex512.S
> @@ -0,0 +1,291 @@
> +/* Copyright (C) 2022 Free Software Foundation, Inc.
> + This file is part of the GNU C Library.
> +
> + The GNU C Library is free software; you can redistribute it and/or
> + modify it under the terms of the GNU Lesser General Public
> + License as published by the Free Software Foundation; either
> + version 2.1 of the License, or (at your option) any later version.
> +
> + The GNU C Library is distributed in the hope that it will be useful,
> + but WITHOUT ANY WARRANTY; without even the implied warranty of
> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + Lesser General Public License for more details.
> +
> + You should have received a copy of the GNU Lesser General Public
> + License along with the GNU C Library; if not, see
> + <https://www.gnu.org/licenses/>. */
> +
> +#if IS_IN (libc)
> +
> +# include <sysdep.h>
> +
> +# ifndef STRLEN
> +# define STRLEN __strlen_evex512
> +# endif
> +
> +# define VMOVA vmovdqa64
> +# ifdef USE_AS_WCSLEN
> +# define VPCMP vpcmpd
> +# define VPMINU vpminud
> +# define CHAR_SIZE 4
> +# else
> +# define VPCMP vpcmpb
> +# define VPMINU vpminub
> +# define CHAR_SIZE 1
> +# endif
> +
> +# define XMM0 xmm16
> +# define ZMM0 zmm16
> +# define ZMM1 zmm17
> +# define ZMM2 zmm18
> +# define ZMM3 zmm19
> +# define ZMM4 zmm20
> +# define VEC_SIZE 64
> +# define PAGE_SIZE 4096
> +# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE)
> +
> + .section .text.evex512, "ax", @progbits
> +/* Aligning entry point to 64 byte, provides better performance for
> + one vector length string. */
> +ENTRY_P2ALIGN (STRLEN, 6)
> +# ifdef USE_AS_STRNLEN
> + /* Check zero length. */
> + test %RSI_LP, %RSI_LP
> + jz L(zero)
> +# ifdef __ILP32__
> + /* Clear the upper 32 bits. */
> + movl %esi, %esi
> +# endif
> +# endif
> +
> + movl %edi, %ecx
> + vpxorq %XMM0, %XMM0, %XMM0
> + andl $(PAGE_SIZE - 1), %ecx
> + cmpl $(PAGE_SIZE - VEC_SIZE), %ecx
Use eax instead of ecx here to save more code size.
> + ja L(page_cross)
> +
> + /* Compare [w]char for null, mask bit will be set for match. */
> + VPCMP $0, (%rdi), %ZMM0, %k0
> + kmovq %k0, %rax
> + testq %rax, %rax
> + jz L(align_more)
> +
> + tzcntq %rax, %rax
Replace tzcnt with bsf to save code size
> +# ifdef USE_AS_STRNLEN
> + cmpq %rsi, %rax
> + jae L(ret_max)
> +# endif
> + ret
> +
> +# ifdef USE_AS_STRNLEN
> + /* eax instead of rax used to save encoding space. */
> +L(zero):
> + xorl %eax, %eax
> + ret
> +# endif
> +
> + /* At this point vector max length reached. */
> +# ifdef USE_AS_STRNLEN
> +L(ret_max):
> + movq %rsi, %rax
> + ret
> +# endif
> +
> +L(page_cross):
> + andl $(VEC_SIZE - 1), %ecx
no needed shifts automatically only use bits in range
> +# ifdef USE_AS_WCSLEN
> + sarl $2, %ecx
> +# endif
> + /* ecx contains number of w[char] to be skipped as a result
> + of address alignment. */
> + movq %rdi, %rax
> + andq $-VEC_SIZE, %rax
You can save further code size doing
`xorq %rdi, %rcx`; VPCMP $0, (PAGE_SIZE - VEC_SIZE)(%rcx)...`
then use `rdi` for the shift.
> + VPCMP $0, (%rax), %ZMM0, %k0
> + kmovq %k0, %rax
> + /* Ignore number of character for alignment adjustment. */
> + shrq %cl, %rax
> + jz L(align_more)
> +
> + tzcntq %rax, %rax
> +# ifdef USE_AS_STRNLEN
> + cmpq %rsi, %rax
> + jae L(ret_max)
> +# endif
> + ret
> +
> +L(align_more):
> + leaq VEC_SIZE(%rdi), %rax
> + /* Align rax to VEC_SIZE. */
> + andq $-VEC_SIZE, %rax
> +# ifdef USE_AS_STRNLEN
> + movq %rax, %rdx
> + subq %rdi, %rdx
> +# ifdef USE_AS_WCSLEN
> + shrq $2, %rdx
> +# endif
> + /* At this point rdx contains [w]chars already compared. */
> + cmpq %rsi, %rdx
You `subq` next inst so just do the comparison with `subq`.
> + jae L(ret_max)
> + subq %rsi, %rdx
> + negq %rdx
> + /* At this point rdx contains number of w[char] needs to go.
> + Now onwards rdx will keep decrementing with each compare. */
> +# endif
> +
> + /* Loop unroll 4 times for 4 vector loop. */
> + VPCMP $0, (%rax), %ZMM0, %k0
> + kmovq %k0, %rcx
> + testq %rcx, %rcx
> + jnz L(ret_vec_x1)
> +
> +# ifdef USE_AS_STRNLEN
> + subq $CHAR_PER_VEC, %rdx
> + jbe L(ret_max)
> +# endif
> +
> + VPCMP $0, VEC_SIZE(%rax), %ZMM0, %k0
> + kmovq %k0, %rcx
> + testq %rcx, %rcx
> + jnz L(ret_vec_x2)
> +
> +# ifdef USE_AS_STRNLEN
> + subq $CHAR_PER_VEC, %rdx
> + jbe L(ret_max)
> +# endif
> +
> + VPCMP $0, (2 * VEC_SIZE)(%rax), %ZMM0, %k0
> + kmovq %k0, %rcx
> + testq %rcx, %rcx
> + jnz L(ret_vec_x3)
> +
> +# ifdef USE_AS_STRNLEN
> + subq $CHAR_PER_VEC, %rdx
> + jbe L(ret_max)
> +# endif
> +
> + VPCMP $0, (3 * VEC_SIZE)(%rax), %ZMM0, %k0
> + kmovq %k0, %rcx
> + testq %rcx, %rcx
> + jnz L(ret_vec_x4)
> +
> + addq $(4 * VEC_SIZE), %rax
> +
> +# ifdef USE_AS_STRNLEN
> + /* Instead of decreasing, rdx increased to prepare for loop
> + first iteration. Incremented 3 times because one increment
> + cancelled by previous decrement. */
> + addq $(3 * CHAR_PER_VEC), %rdx
> +# endif
> +
> + /* Test if address is already 4 * VEC_SIZE byte aligned goto
> + loop. */
> + testq $(3 * VEC_SIZE), %rax
Can only imagine this is is possibly worth it for STRNLEN.
> + jz L(loop)
> +
> + movq %rax, %rcx
> +
> + /* Align address to 4 * VEC_SIZE for loop. */
> + andq $-(4 * VEC_SIZE), %rax
Less code size way to aligned is
`orq $(VEC_SIZE * 4 - 1), %rax; incq %rax`
> +
> +# ifdef USE_AS_STRNLEN
> + subq %rax, %rcx
> +# ifdef USE_AS_WCSLEN
> + sarq $2, %rcx
> +# endif
> + /* rcx contains number of [w]char will be recompared due to
> + alignment fixes. rdx must be incremented by rcx to offset
> + alignment adjustmentment. */
> + addq %rcx, %rdx
> +# endif
> +
> +L(loop):
> +# ifdef USE_AS_STRNLEN
> + subq $(CHAR_PER_VEC * 4), %rdx
> + jbe L(ret_max)
> +# endif
> + /* VPMINU and VPCMP combination provide better perfomance as
> + compared to alternative combinations. */
> + VMOVA (%rax), %ZMM1
> + VPMINU (VEC_SIZE)(%rax), %ZMM1, %ZMM2
> + VMOVA (2 * VEC_SIZE)(%rax), %ZMM3
> + VPMINU (3 * VEC_SIZE)(%rax), %ZMM3, %ZMM4
> +
> + VPCMP $0, %ZMM2, %ZMM0, %k0
> + VPCMP $0, %ZMM4, %ZMM0, %k1
> +
> + addq $(4 * VEC_SIZE), %rax
> + kortestq %k0, %k1
> + jz L(loop)
> +
> + /* Need 4 vector subtraction because address incremented in
> + the loop before terminating condition check. Also want to
> + reuse code for exit condition before and after the loop. */
> + subq $(4 * VEC_SIZE), %rax
Is it possible to just add 4x offset to the loop? No penalty of imm32 encoding
with evex encoding.
> +
> + VPCMP $0, %ZMM1, %ZMM0, %k2
> + kmovq %k2, %rcx
> + testq %rcx, %rcx
> + jnz L(ret_vec_x1)
> +
> + kmovq %k0, %rcx
> + /* At this point, if k0 is non zero, null char must be in the
> + second vector. */
> + testq %rcx, %rcx
> + jnz L(ret_vec_x2)
> +
> + VPCMP $0, %ZMM3, %ZMM0, %k3
> + kmovq %k3, %rcx
> + testq %rcx, %rcx
> + jnz L(ret_vec_x3)
> + /* At this point null [w]char must be in the fourth vector so no
> + need to check. */
> + kmovq %k1, %rcx
> +
> + /* Termination fourth, third, second vector are pretty much
> + same, implemented this way to avoid branching and reuse code
> + from pre loop exit condition. */
> +L(ret_vec_x4):
> + addq $(3 * VEC_SIZE), %rax
> + tzcntq %rcx, %rcx
> + subq %rdi, %rax
> +# ifdef USE_AS_WCSLEN
> + sarq $2, %rax
> +# endif
> + addq %rcx, %rax
> +# ifdef USE_AS_STRNLEN
> + cmpq %rsi, %rax
> + jae L(ret_max)
> +# endif
> + ret
> +
> +L(ret_vec_x3):
> + addq $(2 * VEC_SIZE), %rax
> + tzcntq %rcx, %rcx
> + subq %rdi, %rax
> +# ifdef USE_AS_WCSLEN
> + sarq $2, %rax
> +# endif
> + addq %rcx, %rax
> +# ifdef USE_AS_STRNLEN
> + cmpq %rsi, %rax
> + jae L(ret_max)
> +# endif
> + ret
> +
> +L(ret_vec_x2):
> + addq $VEC_SIZE, %rax
> +L(ret_vec_x1):
> + tzcntq %rcx, %rcx
> + subq %rdi, %rax
> +# ifdef USE_AS_WCSLEN
> + sarq $2, %rax
> +# endif
> + addq %rcx, %rax
> +# ifdef USE_AS_STRNLEN
> + cmpq %rsi, %rax
> + jae L(ret_max)
> +# endif
> + ret
> +
> +END (STRLEN)
> +#endif
> diff --git a/sysdeps/x86_64/multiarch/strnlen-evex512.S b/sysdeps/x86_64/multiarch/strnlen-evex512.S
> new file mode 100644
> index 0000000000..0b7f220214
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/strnlen-evex512.S
> @@ -0,0 +1,4 @@
> +#define STRLEN __strnlen_evex512
> +#define USE_AS_STRNLEN 1
> +
> +#include "strlen-evex512.S"
> diff --git a/sysdeps/x86_64/multiarch/wcslen-evex512.S b/sysdeps/x86_64/multiarch/wcslen-evex512.S
> new file mode 100644
> index 0000000000..f59c372b78
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/wcslen-evex512.S
> @@ -0,0 +1,4 @@
> +#define STRLEN __wcslen_evex512
> +#define USE_AS_WCSLEN 1
> +
> +#include "strlen-evex512.S"
> diff --git a/sysdeps/x86_64/multiarch/wcsnlen-evex512.S b/sysdeps/x86_64/multiarch/wcsnlen-evex512.S
> new file mode 100644
> index 0000000000..73dcf2f210
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/wcsnlen-evex512.S
> @@ -0,0 +1,5 @@
> +#define STRLEN __wcsnlen_evex512
> +#define USE_AS_WCSLEN 1
> +#define USE_AS_STRNLEN 1
> +
> +#include "strlen-evex512.S"
> --
> 2.35.3
>
@@ -81,6 +81,7 @@ sysdep_routines += \
strlen-avx2 \
strlen-avx2-rtm \
strlen-evex \
+ strlen-evex512 \
strlen-sse2 \
strncase_l-avx2 \
strncase_l-avx2-rtm \
@@ -105,6 +106,7 @@ sysdep_routines += \
strnlen-avx2 \
strnlen-avx2-rtm \
strnlen-evex \
+ strnlen-evex512 \
strnlen-sse2 \
strpbrk-c \
strpbrk-sse2 \
@@ -138,6 +140,7 @@ sysdep_routines += \
wcslen-avx2 \
wcslen-avx2-rtm \
wcslen-evex \
+ wcslen-evex512 \
wcslen-sse2 \
wcslen-sse4_1 \
wcsncmp-avx2 \
@@ -148,6 +151,7 @@ sysdep_routines += \
wcsnlen-avx2-rtm \
wcsnlen-c \
wcsnlen-evex \
+ wcsnlen-evex512 \
wcsnlen-sse4_1 \
wcsrchr-avx2 \
wcsrchr-avx2-rtm \
@@ -328,6 +328,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__strlen_evex)
+ IFUNC_IMPL_ADD (array, i, strlen,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
+ __strlen_evex512)
IFUNC_IMPL_ADD (array, i, strlen, 1, __strlen_sse2))
/* Support sysdeps/x86_64/multiarch/strnlen.c. */
@@ -346,6 +351,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__strnlen_evex)
+ IFUNC_IMPL_ADD (array, i, strnlen,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
+ __strnlen_evex512)
IFUNC_IMPL_ADD (array, i, strnlen, 1, __strnlen_sse2))
/* Support sysdeps/x86_64/multiarch/stpncpy.c. */
@@ -699,6 +709,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__wcslen_evex)
+ IFUNC_IMPL_ADD (array, i, wcslen,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
+ __wcslen_evex512)
IFUNC_IMPL_ADD (array, i, wcslen,
CPU_FEATURE_USABLE (SSE4_1),
__wcslen_sse4_1)
@@ -720,6 +735,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__wcsnlen_evex)
+ IFUNC_IMPL_ADD (array, i, wcsnlen,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
+ __wcsnlen_evex512)
IFUNC_IMPL_ADD (array, i, wcsnlen,
CPU_FEATURE_USABLE (SSE4_1),
__wcsnlen_sse4_1)
new file mode 100644
@@ -0,0 +1,291 @@
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#if IS_IN (libc)
+
+# include <sysdep.h>
+
+# ifndef STRLEN
+# define STRLEN __strlen_evex512
+# endif
+
+# define VMOVA vmovdqa64
+# ifdef USE_AS_WCSLEN
+# define VPCMP vpcmpd
+# define VPMINU vpminud
+# define CHAR_SIZE 4
+# else
+# define VPCMP vpcmpb
+# define VPMINU vpminub
+# define CHAR_SIZE 1
+# endif
+
+# define XMM0 xmm16
+# define ZMM0 zmm16
+# define ZMM1 zmm17
+# define ZMM2 zmm18
+# define ZMM3 zmm19
+# define ZMM4 zmm20
+# define VEC_SIZE 64
+# define PAGE_SIZE 4096
+# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE)
+
+ .section .text.evex512, "ax", @progbits
+/* Aligning entry point to 64 byte, provides better performance for
+ one vector length string. */
+ENTRY_P2ALIGN (STRLEN, 6)
+# ifdef USE_AS_STRNLEN
+ /* Check zero length. */
+ test %RSI_LP, %RSI_LP
+ jz L(zero)
+# ifdef __ILP32__
+ /* Clear the upper 32 bits. */
+ movl %esi, %esi
+# endif
+# endif
+
+ movl %edi, %ecx
+ vpxorq %XMM0, %XMM0, %XMM0
+ andl $(PAGE_SIZE - 1), %ecx
+ cmpl $(PAGE_SIZE - VEC_SIZE), %ecx
+ ja L(page_cross)
+
+ /* Compare [w]char for null, mask bit will be set for match. */
+ VPCMP $0, (%rdi), %ZMM0, %k0
+ kmovq %k0, %rax
+ testq %rax, %rax
+ jz L(align_more)
+
+ tzcntq %rax, %rax
+# ifdef USE_AS_STRNLEN
+ cmpq %rsi, %rax
+ jae L(ret_max)
+# endif
+ ret
+
+# ifdef USE_AS_STRNLEN
+ /* eax instead of rax used to save encoding space. */
+L(zero):
+ xorl %eax, %eax
+ ret
+# endif
+
+ /* At this point vector max length reached. */
+# ifdef USE_AS_STRNLEN
+L(ret_max):
+ movq %rsi, %rax
+ ret
+# endif
+
+L(page_cross):
+ andl $(VEC_SIZE - 1), %ecx
+# ifdef USE_AS_WCSLEN
+ sarl $2, %ecx
+# endif
+ /* ecx contains number of w[char] to be skipped as a result
+ of address alignment. */
+ movq %rdi, %rax
+ andq $-VEC_SIZE, %rax
+ VPCMP $0, (%rax), %ZMM0, %k0
+ kmovq %k0, %rax
+ /* Ignore number of character for alignment adjustment. */
+ shrq %cl, %rax
+ jz L(align_more)
+
+ tzcntq %rax, %rax
+# ifdef USE_AS_STRNLEN
+ cmpq %rsi, %rax
+ jae L(ret_max)
+# endif
+ ret
+
+L(align_more):
+ leaq VEC_SIZE(%rdi), %rax
+ /* Align rax to VEC_SIZE. */
+ andq $-VEC_SIZE, %rax
+# ifdef USE_AS_STRNLEN
+ movq %rax, %rdx
+ subq %rdi, %rdx
+# ifdef USE_AS_WCSLEN
+ shrq $2, %rdx
+# endif
+ /* At this point rdx contains [w]chars already compared. */
+ cmpq %rsi, %rdx
+ jae L(ret_max)
+ subq %rsi, %rdx
+ negq %rdx
+ /* At this point rdx contains number of w[char] needs to go.
+ Now onwards rdx will keep decrementing with each compare. */
+# endif
+
+ /* Loop unroll 4 times for 4 vector loop. */
+ VPCMP $0, (%rax), %ZMM0, %k0
+ kmovq %k0, %rcx
+ testq %rcx, %rcx
+ jnz L(ret_vec_x1)
+
+# ifdef USE_AS_STRNLEN
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(ret_max)
+# endif
+
+ VPCMP $0, VEC_SIZE(%rax), %ZMM0, %k0
+ kmovq %k0, %rcx
+ testq %rcx, %rcx
+ jnz L(ret_vec_x2)
+
+# ifdef USE_AS_STRNLEN
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(ret_max)
+# endif
+
+ VPCMP $0, (2 * VEC_SIZE)(%rax), %ZMM0, %k0
+ kmovq %k0, %rcx
+ testq %rcx, %rcx
+ jnz L(ret_vec_x3)
+
+# ifdef USE_AS_STRNLEN
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(ret_max)
+# endif
+
+ VPCMP $0, (3 * VEC_SIZE)(%rax), %ZMM0, %k0
+ kmovq %k0, %rcx
+ testq %rcx, %rcx
+ jnz L(ret_vec_x4)
+
+ addq $(4 * VEC_SIZE), %rax
+
+# ifdef USE_AS_STRNLEN
+ /* Instead of decreasing, rdx increased to prepare for loop
+ first iteration. Incremented 3 times because one increment
+ cancelled by previous decrement. */
+ addq $(3 * CHAR_PER_VEC), %rdx
+# endif
+
+ /* Test if address is already 4 * VEC_SIZE byte aligned goto
+ loop. */
+ testq $(3 * VEC_SIZE), %rax
+ jz L(loop)
+
+ movq %rax, %rcx
+
+ /* Align address to 4 * VEC_SIZE for loop. */
+ andq $-(4 * VEC_SIZE), %rax
+
+# ifdef USE_AS_STRNLEN
+ subq %rax, %rcx
+# ifdef USE_AS_WCSLEN
+ sarq $2, %rcx
+# endif
+ /* rcx contains number of [w]char will be recompared due to
+ alignment fixes. rdx must be incremented by rcx to offset
+ alignment adjustmentment. */
+ addq %rcx, %rdx
+# endif
+
+L(loop):
+# ifdef USE_AS_STRNLEN
+ subq $(CHAR_PER_VEC * 4), %rdx
+ jbe L(ret_max)
+# endif
+ /* VPMINU and VPCMP combination provide better perfomance as
+ compared to alternative combinations. */
+ VMOVA (%rax), %ZMM1
+ VPMINU (VEC_SIZE)(%rax), %ZMM1, %ZMM2
+ VMOVA (2 * VEC_SIZE)(%rax), %ZMM3
+ VPMINU (3 * VEC_SIZE)(%rax), %ZMM3, %ZMM4
+
+ VPCMP $0, %ZMM2, %ZMM0, %k0
+ VPCMP $0, %ZMM4, %ZMM0, %k1
+
+ addq $(4 * VEC_SIZE), %rax
+ kortestq %k0, %k1
+ jz L(loop)
+
+ /* Need 4 vector subtraction because address incremented in
+ the loop before terminating condition check. Also want to
+ reuse code for exit condition before and after the loop. */
+ subq $(4 * VEC_SIZE), %rax
+
+ VPCMP $0, %ZMM1, %ZMM0, %k2
+ kmovq %k2, %rcx
+ testq %rcx, %rcx
+ jnz L(ret_vec_x1)
+
+ kmovq %k0, %rcx
+ /* At this point, if k0 is non zero, null char must be in the
+ second vector. */
+ testq %rcx, %rcx
+ jnz L(ret_vec_x2)
+
+ VPCMP $0, %ZMM3, %ZMM0, %k3
+ kmovq %k3, %rcx
+ testq %rcx, %rcx
+ jnz L(ret_vec_x3)
+ /* At this point null [w]char must be in the fourth vector so no
+ need to check. */
+ kmovq %k1, %rcx
+
+ /* Termination fourth, third, second vector are pretty much
+ same, implemented this way to avoid branching and reuse code
+ from pre loop exit condition. */
+L(ret_vec_x4):
+ addq $(3 * VEC_SIZE), %rax
+ tzcntq %rcx, %rcx
+ subq %rdi, %rax
+# ifdef USE_AS_WCSLEN
+ sarq $2, %rax
+# endif
+ addq %rcx, %rax
+# ifdef USE_AS_STRNLEN
+ cmpq %rsi, %rax
+ jae L(ret_max)
+# endif
+ ret
+
+L(ret_vec_x3):
+ addq $(2 * VEC_SIZE), %rax
+ tzcntq %rcx, %rcx
+ subq %rdi, %rax
+# ifdef USE_AS_WCSLEN
+ sarq $2, %rax
+# endif
+ addq %rcx, %rax
+# ifdef USE_AS_STRNLEN
+ cmpq %rsi, %rax
+ jae L(ret_max)
+# endif
+ ret
+
+L(ret_vec_x2):
+ addq $VEC_SIZE, %rax
+L(ret_vec_x1):
+ tzcntq %rcx, %rcx
+ subq %rdi, %rax
+# ifdef USE_AS_WCSLEN
+ sarq $2, %rax
+# endif
+ addq %rcx, %rax
+# ifdef USE_AS_STRNLEN
+ cmpq %rsi, %rax
+ jae L(ret_max)
+# endif
+ ret
+
+END (STRLEN)
+#endif
new file mode 100644
@@ -0,0 +1,4 @@
+#define STRLEN __strnlen_evex512
+#define USE_AS_STRNLEN 1
+
+#include "strlen-evex512.S"
new file mode 100644
@@ -0,0 +1,4 @@
+#define STRLEN __wcslen_evex512
+#define USE_AS_WCSLEN 1
+
+#include "strlen-evex512.S"
new file mode 100644
@@ -0,0 +1,5 @@
+#define STRLEN __wcsnlen_evex512
+#define USE_AS_WCSLEN 1
+#define USE_AS_STRNLEN 1
+
+#include "strlen-evex512.S"