[v3] x86_64: Implement evex512 version of strchrnul, strchr and wcschr

Message ID 20221025233504.843025-1-skpgkp2@gmail.com
State New
Headers
Series [v3] x86_64: Implement evex512 version of strchrnul, strchr and wcschr |

Checks

Context Check Description
dj/TryBot-apply_patch success Patch applied to master at the time it was sent
dj/TryBot-32bit success Build for i686

Commit Message

Sunil Pandey Oct. 25, 2022, 11:35 p.m. UTC
  Changes from v2:
- Replace 2x load with mask logic.
Changes from v1:
- Use VEC API.
- Replace extra lea in align_more with add.
- Restructure loop logic.
- Create zero_2 to avoid long jmp.
- Combine first, second and third vector return logic.

This patch implements following evex512 version of string functions.
evex512 version takes up to 30% less cycle as compared to evex,
depending on length and alignment.

- strchrnul function using 512 bit vectors.
- strchr function using 512 bit vectors.
- wcschr function using 512 bit vectors.

Code size data:

strchrnul-evex.o	599 byte
strchrnul-evex512.o	569 byte (-5%)

strchr-evex.o		639 byte
strchr-evex512.o	595 byte (-7%)

wcschr-evex.o		644 byte
wcschr-evex512.o	607 byte (-6%)

Placeholder function, not used by any processor at the moment.
---
 sysdeps/x86_64/multiarch/Makefile            |   3 +
 sysdeps/x86_64/multiarch/ifunc-impl-list.c   |  12 +
 sysdeps/x86_64/multiarch/strchr-evex-base.S  | 282 +++++++++++++++++++
 sysdeps/x86_64/multiarch/strchr-evex512.S    |   8 +
 sysdeps/x86_64/multiarch/strchrnul-evex512.S |   8 +
 sysdeps/x86_64/multiarch/wcschr-evex512.S    |   9 +
 6 files changed, 322 insertions(+)
 create mode 100644 sysdeps/x86_64/multiarch/strchr-evex-base.S
 create mode 100644 sysdeps/x86_64/multiarch/strchr-evex512.S
 create mode 100644 sysdeps/x86_64/multiarch/strchrnul-evex512.S
 create mode 100644 sysdeps/x86_64/multiarch/wcschr-evex512.S
  

Comments

Noah Goldstein Oct. 26, 2022, 1:35 a.m. UTC | #1
On Tue, Oct 25, 2022 at 6:35 PM Sunil K Pandey via Libc-alpha
<libc-alpha@sourceware.org> wrote:
>
> Changes from v2:
> - Replace 2x load with mask logic.
> Changes from v1:
> - Use VEC API.
> - Replace extra lea in align_more with add.
> - Restructure loop logic.
> - Create zero_2 to avoid long jmp.
> - Combine first, second and third vector return logic.
>
> This patch implements following evex512 version of string functions.
> evex512 version takes up to 30% less cycle as compared to evex,
> depending on length and alignment.
>
> - strchrnul function using 512 bit vectors.
> - strchr function using 512 bit vectors.
> - wcschr function using 512 bit vectors.
>
> Code size data:
>
> strchrnul-evex.o        599 byte
> strchrnul-evex512.o     569 byte (-5%)
>
> strchr-evex.o           639 byte
> strchr-evex512.o        595 byte (-7%)
>
> wcschr-evex.o           644 byte
> wcschr-evex512.o        607 byte (-6%)
>
> Placeholder function, not used by any processor at the moment.
> ---
>  sysdeps/x86_64/multiarch/Makefile            |   3 +
>  sysdeps/x86_64/multiarch/ifunc-impl-list.c   |  12 +
>  sysdeps/x86_64/multiarch/strchr-evex-base.S  | 282 +++++++++++++++++++
>  sysdeps/x86_64/multiarch/strchr-evex512.S    |   8 +
>  sysdeps/x86_64/multiarch/strchrnul-evex512.S |   8 +
>  sysdeps/x86_64/multiarch/wcschr-evex512.S    |   9 +
>  6 files changed, 322 insertions(+)
>  create mode 100644 sysdeps/x86_64/multiarch/strchr-evex-base.S
>  create mode 100644 sysdeps/x86_64/multiarch/strchr-evex512.S
>  create mode 100644 sysdeps/x86_64/multiarch/strchrnul-evex512.S
>  create mode 100644 sysdeps/x86_64/multiarch/wcschr-evex512.S
>
> diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
> index e974b1ad97..597ac9d5e9 100644
> --- a/sysdeps/x86_64/multiarch/Makefile
> +++ b/sysdeps/x86_64/multiarch/Makefile
> @@ -62,11 +62,13 @@ sysdep_routines += \
>    strchr-avx2 \
>    strchr-avx2-rtm \
>    strchr-evex \
> +  strchr-evex512 \
>    strchr-sse2 \
>    strchr-sse2-no-bsf \
>    strchrnul-avx2 \
>    strchrnul-avx2-rtm \
>    strchrnul-evex \
> +  strchrnul-evex512 \
>    strchrnul-sse2 \
>    strcmp-avx2 \
>    strcmp-avx2-rtm \
> @@ -131,6 +133,7 @@ sysdep_routines += \
>    wcschr-avx2 \
>    wcschr-avx2-rtm \
>    wcschr-evex \
> +  wcschr-evex512 \
>    wcschr-sse2 \
>    wcscmp-avx2 \
>    wcscmp-avx2-rtm \
> diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> index 529c0b0ef0..c3d75a09f4 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> @@ -544,6 +544,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>                                       && CPU_FEATURE_USABLE (AVX512BW)
>                                       && CPU_FEATURE_USABLE (BMI2)),
>                                      __strchr_evex)
> +             X86_IFUNC_IMPL_ADD_V4 (array, i, strchr,
> +                                    (CPU_FEATURE_USABLE (AVX512VL)
> +                                     && CPU_FEATURE_USABLE (AVX512BW)),
> +                                    __strchr_evex512)
>               X86_IFUNC_IMPL_ADD_V3 (array, i, strchr,
>                                      (CPU_FEATURE_USABLE (AVX2)
>                                       && CPU_FEATURE_USABLE (BMI2)),
> @@ -569,6 +573,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>                                       && CPU_FEATURE_USABLE (AVX512BW)
>                                       && CPU_FEATURE_USABLE (BMI2)),
>                                      __strchrnul_evex)
> +             X86_IFUNC_IMPL_ADD_V4 (array, i, strchrnul,
> +                                    (CPU_FEATURE_USABLE (AVX512VL)
> +                                     && CPU_FEATURE_USABLE (AVX512BW)),
> +                                    __strchrnul_evex512)
>               X86_IFUNC_IMPL_ADD_V3 (array, i, strchrnul,
>                                      (CPU_FEATURE_USABLE (AVX2)
>                                       && CPU_FEATURE_USABLE (BMI2)),
> @@ -793,6 +801,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>                                       && CPU_FEATURE_USABLE (AVX512BW)
>                                       && CPU_FEATURE_USABLE (BMI2)),
>                                      __wcschr_evex)
> +             X86_IFUNC_IMPL_ADD_V4 (array, i, wcschr,
> +                                    (CPU_FEATURE_USABLE (AVX512VL)
> +                                     && CPU_FEATURE_USABLE (AVX512BW)),
> +                                    __wcschr_evex512)
>               X86_IFUNC_IMPL_ADD_V3 (array, i, wcschr,
>                                      (CPU_FEATURE_USABLE (AVX2)
>                                       && CPU_FEATURE_USABLE (BMI2)),
> diff --git a/sysdeps/x86_64/multiarch/strchr-evex-base.S b/sysdeps/x86_64/multiarch/strchr-evex-base.S
> new file mode 100644
> index 0000000000..21a6bc5907
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/strchr-evex-base.S
> @@ -0,0 +1,282 @@
> +/* Placeholder function, not used by any processor at the moment.
> +   Copyright (C) 2022 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +/* UNUSED. Exists purely as reference implementation.  */
> +
> +#include <isa-level.h>
> +
> +#if ISA_SHOULD_BUILD (4)
> +
> +# include <sysdep.h>
> +
> +# ifdef USE_AS_WCSCHR
> +#  define CHAR_REG     esi
> +#  define CHAR_SIZE    4
> +#  define VPBROADCAST   vpbroadcastd
> +#  define VPCMP                vpcmpd
> +#  define VPCMPNE      vpcmpneqd
> +#  define VPMINU       vpminud
> +#  define VPTEST       vptestmd
> +#  define VPTESTN      vptestnmd
> +# else
> +#  define CHAR_REG     sil
> +#  define CHAR_SIZE    1
> +#  define VPBROADCAST   vpbroadcastb
> +#  define VPCMP                vpcmpb
> +#  define VPCMPNE      vpcmpneqb
> +#  define VPMINU       vpminub
> +#  define VPTEST       vptestmb
> +#  define VPTESTN      vptestnmb
> +# endif
> +
> +# define PAGE_SIZE     4096
> +# define CHAR_PER_VEC  (VEC_SIZE / CHAR_SIZE)
> +# define VEC_MATCH_MASK ((1 << CHAR_PER_VEC) - 1)
> +
> +       .section SECTION(.text), "ax", @progbits
> +/* Aligning entry point to 64 byte, provides better performance for
> +   one vector length string.  */
> +ENTRY_P2ALIGN (STRCHR, 6)
> +
> +       /* Broadcast CHAR to VMM(0).  */
> +       VPBROADCAST %esi, %VMM(0)
> +       movl    %edi, %eax
> +       sall    $20,%eax
> +       cmpl    $((PAGE_SIZE - VEC_SIZE) << 20), %eax
> +       ja      L(page_cross)
> +
> +       VMOVU   (%rdi), %VMM(1)
> +       VPCMPNE %VMM(1), %VMM(0), %k1
> +       VPTEST  %VMM(1), %VMM(1), %k0{%k1}
> +       KMOV    %k0, %VRAX
> +       /* Compare [w]char for null, mask bit will be set for match.  */
> +
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRAX
> +# else
> +       inc     %VRAX
> +# endif
> +       jz      L(align_more)
> +
> +       bsf     %VRAX, %VRAX
> +
> +# ifdef USE_AS_WCSCHR
> +       leaq    (%rdi, %rax, CHAR_SIZE), %rax
> +# else
> +       add     %rdi, %rax
> +# endif
> +# ifndef USE_AS_STRCHRNUL
> +       cmp     (%rax), %CHAR_REG
> +       jne     L(zero)
> +       ret
> +L(zero):
> +       xorl    %eax, %eax
> +# endif
> +       ret
> +
> +L(ret_vec_x3):
> +       subq    $-VEC_SIZE, %rdi
> +L(ret_vec_x2):
> +       subq    $-VEC_SIZE, %rdi
> +L(ret_vec_x1):
> +       bsf     %VRAX, %VRAX
> +# ifdef USE_AS_WCSCHR
> +       leaq    (%rdi, %rax, CHAR_SIZE), %rax
> +# else
> +       add     %rdi, %rax
> +# endif
> +
> +# ifndef USE_AS_STRCHRNUL
> +       cmp     (%rax), %CHAR_REG
> +       jne     L(zero)
> +# endif
> +       ret
> +
> +L(page_cross):
> +       mov     %rdi, %rax
> +       movl    %edi, %ecx
> +# ifdef USE_AS_WCSCHR
> +       /* Calculate number of compare result bits to be skipped for
> +          wide string alignment adjustment.  */
> +       andl    $(VEC_SIZE - 1), %ecx
> +       sarl    $2, %ecx
> +# endif
> +       /* ecx contains number of w[char] to be skipped as a result
> +          of address alignment.  */
> +       andq    $-VEC_SIZE, %rax
> +
> +       VMOVA   (%rax), %VMM(1)
> +       VPCMPNE %VMM(1), %VMM(0), %k1
> +       VPTEST  %VMM(1), %VMM(1), %k0{%k1}
> +       KMOV    %k0, %VRAX
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRAX
> +# else
> +       inc     %VRAX
> +# endif
> +       /* Ignore number of character for alignment adjustment.  */
> +       shr     %cl, %VRAX
> +       jz      L(align_more)
> +
> +       bsf     %VRAX, %VRAX
> +# ifdef USE_AS_WCSCHR
> +       leaq    (%rdi, %rax, CHAR_SIZE), %rax
> +# else
> +       addq    %rdi, %rax
> +# endif
> +
> +# ifndef USE_AS_STRCHRNUL
> +       cmp     (%rax), %CHAR_REG
> +       jne     L(zero)
> +# endif
> +       ret
> +
> +L(align_more):
> +       /* Align rax to VEC_SIZE.  */
> +       andq    $-VEC_SIZE, %rdi
> +
> +       /* Loop unroll 4 times for 4 vector loop.  */
> +       VMOVA   VEC_SIZE(%rdi), %VMM(1)
> +       VPCMPNE %VMM(1), %VMM(0), %k1
> +       VPTEST  %VMM(1), %VMM(1), %k0{%k1}
> +
> +       /* Increment rdi by vector size for further comparison and
> +          return.  */
> +       subq    $-VEC_SIZE, %rdi
> +       KMOV    %k0, %VRAX
> +
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRAX
> +# else
> +       inc     %VRAX
> +# endif
> +       jnz     L(ret_vec_x1)
> +
> +       VMOVA   VEC_SIZE(%rdi), %VMM(1)
> +       VPCMPNE %VMM(1), %VMM(0), %k1
> +       VPTEST  %VMM(1), %VMM(1), %k0{%k1}
> +       KMOV    %k0, %VRAX
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRAX
> +# else
> +       inc     %VRAX
> +# endif
> +       jnz     L(ret_vec_x2)
> +
> +       VMOVA   (VEC_SIZE * 2)(%rdi), %VMM(1)
> +       VPCMPNE %VMM(1), %VMM(0), %k1
> +       VPTEST  %VMM(1), %VMM(1), %k0{%k1}
> +       KMOV    %k0, %VRAX
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRAX
> +# else
> +       inc     %VRAX
> +# endif
> +       jnz     L(ret_vec_x3)
> +
> +       VMOVA   (VEC_SIZE * 3)(%rdi), %VMM(1)
> +       VPCMPNE %VMM(1), %VMM(0), %k1
> +       VPTEST  %VMM(1), %VMM(1), %k0{%k1}
> +       KMOV    %k0, %VRDX
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRDX
> +# else
> +       inc     %VRDX
> +# endif
> +       jnz     L(ret_vec_x4)
> +
> +
> +       /* Align address to VEC_SIZE * 4 for loop.  */
> +       andq    $-(VEC_SIZE * 4), %rdi
> +L(loop):
> +       /* VPMINU and VPCMP combination provide better performance as
> +          compared to alternative combinations.  */
> +       VMOVA   (VEC_SIZE * 4)(%rdi), %VMM(1)
> +       VMOVA   (VEC_SIZE * 5)(%rdi), %VMM(2)
> +       VMOVA   (VEC_SIZE * 6)(%rdi), %VMM(3)
> +       VMOVA   (VEC_SIZE * 7)(%rdi), %VMM(4)
> +
> +       VPCMPNE %VMM(1), %VMM(0), %k1
> +       VPCMPNE %VMM(2), %VMM(0), %k2
> +
> +       VPMINU  %VMM(2), %VMM(1), %VMM(2)
> +
> +       VPCMPNE %VMM(3), %VMM(0), %k3{%k1}
> +       VPCMPNE %VMM(4), %VMM(0), %k4{%k2}
> +
> +       VPMINU  %VMM(4), %VMM(3), %VMM(4)
> +       VPMINU  %VMM(2), %VMM(4), %VMM(4){%k3}{z}
> +
> +       VPTEST  %VMM(4), %VMM(4), %k5{%k4}
> +
> +       KMOV    %k5, %VRDX
> +       subq    $-(VEC_SIZE * 4), %rdi
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRDX
> +# else
> +       inc     %VRDX
> +# endif
> +       jz      L(loop)
> +
> +       VPTEST  %VMM(1), %VMM(1), %k0{%k1}
> +       KMOV    %k0, %VRAX
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRAX
> +# else
> +       inc     %VRAX
> +# endif
> +       jnz     L(ret_vec_x1)
> +
> +       VPTEST  %VMM(2), %VMM(2), %k0{%k2}
> +       KMOV    %k0, %VRAX
> +       /* At this point, if k1 is non zero, null char must be in the
> +          second vector.  */
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRAX
> +# else
> +       inc     %VRAX
> +# endif
> +       jnz     L(ret_vec_x2)
> +
> +       VPTEST  %VMM(3), %VMM(3), %k0{%k3}
> +       KMOV    %k0, %VRAX
> +# ifdef USE_AS_WCSCHR
> +       sub     $VEC_MATCH_MASK, %VRAX
> +# else
> +       inc     %VRAX
> +# endif
> +       jnz     L(ret_vec_x3)
> +       /* At this point null [w]char must be in the fourth vector so no
> +          need to check.  */
> +
> +L(ret_vec_x4):
> +       bsf     %VRDX, %VRAX
> +       leaq    (VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax

Can you make this:
```
bsf %VRDX, %VRDX
leaq (VEC_SIZE * 3)(%rdi, %rdx, CHAR_SIZE), %rax
```
bsf has a false dependency on the destination register.

> +# ifndef USE_AS_STRCHRNUL
> +       cmp     (%rax), %CHAR_REG
> +       jne     L(zero_2)
> +# endif
> +       ret
> +
> +# ifndef USE_AS_STRCHRNUL
> +L(zero_2):
> +       xor     %eax, %eax
> +       ret
> +# endif
> +END (STRCHR)
> +#endif
> diff --git a/sysdeps/x86_64/multiarch/strchr-evex512.S b/sysdeps/x86_64/multiarch/strchr-evex512.S
> new file mode 100644
> index 0000000000..a4ac022952
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/strchr-evex512.S
> @@ -0,0 +1,8 @@
> +# ifndef STRCHR
> +#  define STRCHR       __strchr_evex512
> +# endif
> +
> +#include "x86-evex512-vecs.h"
> +#include "reg-macros.h"
> +
> +#include "strchr-evex-base.S"
> diff --git a/sysdeps/x86_64/multiarch/strchrnul-evex512.S b/sysdeps/x86_64/multiarch/strchrnul-evex512.S
> new file mode 100644
> index 0000000000..1be0b12f38
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/strchrnul-evex512.S
> @@ -0,0 +1,8 @@
> +#ifndef STRCHRNUL
> +# define STRCHRNUL     __strchrnul_evex512
> +#endif
> +
> +#define STRCHR STRCHRNUL
> +#define USE_AS_STRCHRNUL 1
> +
> +#include "strchr-evex512.S"
> diff --git a/sysdeps/x86_64/multiarch/wcschr-evex512.S b/sysdeps/x86_64/multiarch/wcschr-evex512.S
> new file mode 100644
> index 0000000000..3fe4e77a70
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/wcschr-evex512.S
> @@ -0,0 +1,9 @@
> +#ifndef WCSCHR
> +# define WCSCHR        __wcschr_evex512
> +#endif
> +
> +#define STRCHR WCSCHR
> +#define USE_AS_WCSCHR 1
> +
> +#define USE_WIDE_CHAR 1
> +#include "strchr-evex512.S"
> --
> 2.36.1
>
  

Patch

diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index e974b1ad97..597ac9d5e9 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -62,11 +62,13 @@  sysdep_routines += \
   strchr-avx2 \
   strchr-avx2-rtm \
   strchr-evex \
+  strchr-evex512 \
   strchr-sse2 \
   strchr-sse2-no-bsf \
   strchrnul-avx2 \
   strchrnul-avx2-rtm \
   strchrnul-evex \
+  strchrnul-evex512 \
   strchrnul-sse2 \
   strcmp-avx2 \
   strcmp-avx2-rtm \
@@ -131,6 +133,7 @@  sysdep_routines += \
   wcschr-avx2 \
   wcschr-avx2-rtm \
   wcschr-evex \
+  wcschr-evex512 \
   wcschr-sse2 \
   wcscmp-avx2 \
   wcscmp-avx2-rtm \
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index 529c0b0ef0..c3d75a09f4 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -544,6 +544,10 @@  __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 				      && CPU_FEATURE_USABLE (AVX512BW)
 				      && CPU_FEATURE_USABLE (BMI2)),
 				     __strchr_evex)
+	      X86_IFUNC_IMPL_ADD_V4 (array, i, strchr,
+				     (CPU_FEATURE_USABLE (AVX512VL)
+				      && CPU_FEATURE_USABLE (AVX512BW)),
+				     __strchr_evex512)
 	      X86_IFUNC_IMPL_ADD_V3 (array, i, strchr,
 				     (CPU_FEATURE_USABLE (AVX2)
 				      && CPU_FEATURE_USABLE (BMI2)),
@@ -569,6 +573,10 @@  __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 				      && CPU_FEATURE_USABLE (AVX512BW)
 				      && CPU_FEATURE_USABLE (BMI2)),
 				     __strchrnul_evex)
+	      X86_IFUNC_IMPL_ADD_V4 (array, i, strchrnul,
+				     (CPU_FEATURE_USABLE (AVX512VL)
+				      && CPU_FEATURE_USABLE (AVX512BW)),
+				     __strchrnul_evex512)
 	      X86_IFUNC_IMPL_ADD_V3 (array, i, strchrnul,
 				     (CPU_FEATURE_USABLE (AVX2)
 				      && CPU_FEATURE_USABLE (BMI2)),
@@ -793,6 +801,10 @@  __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 				      && CPU_FEATURE_USABLE (AVX512BW)
 				      && CPU_FEATURE_USABLE (BMI2)),
 				     __wcschr_evex)
+	      X86_IFUNC_IMPL_ADD_V4 (array, i, wcschr,
+				     (CPU_FEATURE_USABLE (AVX512VL)
+				      && CPU_FEATURE_USABLE (AVX512BW)),
+				     __wcschr_evex512)
 	      X86_IFUNC_IMPL_ADD_V3 (array, i, wcschr,
 				     (CPU_FEATURE_USABLE (AVX2)
 				      && CPU_FEATURE_USABLE (BMI2)),
diff --git a/sysdeps/x86_64/multiarch/strchr-evex-base.S b/sysdeps/x86_64/multiarch/strchr-evex-base.S
new file mode 100644
index 0000000000..21a6bc5907
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchr-evex-base.S
@@ -0,0 +1,282 @@ 
+/* Placeholder function, not used by any processor at the moment.
+   Copyright (C) 2022 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+/* UNUSED. Exists purely as reference implementation.  */
+
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
+# include <sysdep.h>
+
+# ifdef USE_AS_WCSCHR
+#  define CHAR_REG	esi
+#  define CHAR_SIZE	4
+#  define VPBROADCAST   vpbroadcastd
+#  define VPCMP		vpcmpd
+#  define VPCMPNE	vpcmpneqd
+#  define VPMINU	vpminud
+#  define VPTEST	vptestmd
+#  define VPTESTN	vptestnmd
+# else
+#  define CHAR_REG	sil
+#  define CHAR_SIZE	1
+#  define VPBROADCAST   vpbroadcastb
+#  define VPCMP		vpcmpb
+#  define VPCMPNE	vpcmpneqb
+#  define VPMINU	vpminub
+#  define VPTEST	vptestmb
+#  define VPTESTN	vptestnmb
+# endif
+
+# define PAGE_SIZE	4096
+# define CHAR_PER_VEC	(VEC_SIZE / CHAR_SIZE)
+# define VEC_MATCH_MASK ((1 << CHAR_PER_VEC) - 1)
+
+	.section SECTION(.text), "ax", @progbits
+/* Aligning entry point to 64 byte, provides better performance for
+   one vector length string.  */
+ENTRY_P2ALIGN (STRCHR, 6)
+
+	/* Broadcast CHAR to VMM(0).  */
+	VPBROADCAST %esi, %VMM(0)
+	movl	%edi, %eax
+	sall	$20,%eax
+	cmpl	$((PAGE_SIZE - VEC_SIZE) << 20), %eax
+	ja	L(page_cross)
+
+	VMOVU	(%rdi), %VMM(1)
+	VPCMPNE	%VMM(1), %VMM(0), %k1
+	VPTEST	%VMM(1), %VMM(1), %k0{%k1}
+	KMOV	%k0, %VRAX
+	/* Compare [w]char for null, mask bit will be set for match.  */
+
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRAX
+# else
+	inc	%VRAX
+# endif
+	jz	L(align_more)
+
+	bsf	%VRAX, %VRAX
+
+# ifdef USE_AS_WCSCHR
+	leaq	(%rdi, %rax, CHAR_SIZE), %rax
+# else
+	add	%rdi, %rax
+# endif
+# ifndef USE_AS_STRCHRNUL
+	cmp	(%rax), %CHAR_REG
+	jne	L(zero)
+	ret
+L(zero):
+	xorl	%eax, %eax
+# endif
+	ret
+
+L(ret_vec_x3):
+	subq	$-VEC_SIZE, %rdi
+L(ret_vec_x2):
+	subq	$-VEC_SIZE, %rdi
+L(ret_vec_x1):
+	bsf     %VRAX, %VRAX
+# ifdef USE_AS_WCSCHR
+	leaq	(%rdi, %rax, CHAR_SIZE), %rax
+# else
+	add	%rdi, %rax
+# endif
+
+# ifndef USE_AS_STRCHRNUL
+	cmp	(%rax), %CHAR_REG
+	jne	L(zero)
+# endif
+	ret
+
+L(page_cross):
+	mov	%rdi, %rax
+	movl	%edi, %ecx
+# ifdef USE_AS_WCSCHR
+	/* Calculate number of compare result bits to be skipped for
+	   wide string alignment adjustment.  */
+	andl	$(VEC_SIZE - 1), %ecx
+	sarl	$2, %ecx
+# endif
+	/* ecx contains number of w[char] to be skipped as a result
+	   of address alignment.  */
+	andq    $-VEC_SIZE, %rax
+
+	VMOVA	(%rax), %VMM(1)
+	VPCMPNE	%VMM(1), %VMM(0), %k1
+	VPTEST	%VMM(1), %VMM(1), %k0{%k1}
+	KMOV	%k0, %VRAX
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRAX
+# else
+	inc	%VRAX
+# endif
+	/* Ignore number of character for alignment adjustment.  */
+	shr	%cl, %VRAX
+	jz	L(align_more)
+
+	bsf	%VRAX, %VRAX
+# ifdef USE_AS_WCSCHR
+	leaq	(%rdi, %rax, CHAR_SIZE), %rax
+# else
+	addq	%rdi, %rax
+# endif
+
+# ifndef USE_AS_STRCHRNUL
+	cmp	(%rax), %CHAR_REG
+	jne	L(zero)
+# endif
+	ret
+
+L(align_more):
+	/* Align rax to VEC_SIZE.  */
+	andq	$-VEC_SIZE, %rdi
+
+	/* Loop unroll 4 times for 4 vector loop.  */
+	VMOVA	VEC_SIZE(%rdi), %VMM(1)
+	VPCMPNE	%VMM(1), %VMM(0), %k1
+	VPTEST	%VMM(1), %VMM(1), %k0{%k1}
+
+	/* Increment rdi by vector size for further comparison and
+	   return.  */
+	subq	$-VEC_SIZE, %rdi
+	KMOV	%k0, %VRAX
+
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRAX
+# else
+	inc	%VRAX
+# endif
+	jnz	L(ret_vec_x1)
+
+	VMOVA	VEC_SIZE(%rdi), %VMM(1)
+	VPCMPNE	%VMM(1), %VMM(0), %k1
+	VPTEST	%VMM(1), %VMM(1), %k0{%k1}
+	KMOV	%k0, %VRAX
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRAX
+# else
+	inc	%VRAX
+# endif
+	jnz	L(ret_vec_x2)
+
+	VMOVA	(VEC_SIZE * 2)(%rdi), %VMM(1)
+	VPCMPNE	%VMM(1), %VMM(0), %k1
+	VPTEST	%VMM(1), %VMM(1), %k0{%k1}
+	KMOV	%k0, %VRAX
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRAX
+# else
+	inc	%VRAX
+# endif
+	jnz	L(ret_vec_x3)
+
+	VMOVA	(VEC_SIZE * 3)(%rdi), %VMM(1)
+	VPCMPNE	%VMM(1), %VMM(0), %k1
+	VPTEST	%VMM(1), %VMM(1), %k0{%k1}
+	KMOV	%k0, %VRDX
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRDX
+# else
+	inc	%VRDX
+# endif
+	jnz	L(ret_vec_x4)
+
+
+	/* Align address to VEC_SIZE * 4 for loop.  */
+	andq	$-(VEC_SIZE * 4), %rdi
+L(loop):
+	/* VPMINU and VPCMP combination provide better performance as
+	   compared to alternative combinations.  */
+	VMOVA	(VEC_SIZE * 4)(%rdi), %VMM(1)
+	VMOVA	(VEC_SIZE * 5)(%rdi), %VMM(2)
+	VMOVA	(VEC_SIZE * 6)(%rdi), %VMM(3)
+	VMOVA	(VEC_SIZE * 7)(%rdi), %VMM(4)
+
+	VPCMPNE	%VMM(1), %VMM(0), %k1
+	VPCMPNE	%VMM(2), %VMM(0), %k2
+
+	VPMINU	%VMM(2), %VMM(1), %VMM(2)
+
+	VPCMPNE	%VMM(3), %VMM(0), %k3{%k1}
+	VPCMPNE	%VMM(4), %VMM(0), %k4{%k2}
+
+	VPMINU	%VMM(4), %VMM(3), %VMM(4)
+	VPMINU	%VMM(2), %VMM(4), %VMM(4){%k3}{z}
+
+	VPTEST	%VMM(4), %VMM(4), %k5{%k4}
+
+	KMOV	%k5, %VRDX
+	subq	$-(VEC_SIZE * 4), %rdi
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRDX
+# else
+	inc	%VRDX
+# endif
+	jz	L(loop)
+
+	VPTEST	%VMM(1), %VMM(1), %k0{%k1}
+	KMOV	%k0, %VRAX
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRAX
+# else
+	inc	%VRAX
+# endif
+	jnz	L(ret_vec_x1)
+
+	VPTEST	%VMM(2), %VMM(2), %k0{%k2}
+	KMOV	%k0, %VRAX
+	/* At this point, if k1 is non zero, null char must be in the
+	   second vector.  */
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRAX
+# else
+	inc	%VRAX
+# endif
+	jnz	L(ret_vec_x2)
+
+	VPTEST	%VMM(3), %VMM(3), %k0{%k3}
+	KMOV	%k0, %VRAX
+# ifdef USE_AS_WCSCHR
+	sub	$VEC_MATCH_MASK, %VRAX
+# else
+	inc	%VRAX
+# endif
+	jnz	L(ret_vec_x3)
+	/* At this point null [w]char must be in the fourth vector so no
+	   need to check.  */
+
+L(ret_vec_x4):
+	bsf	%VRDX, %VRAX
+	leaq	(VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax
+# ifndef USE_AS_STRCHRNUL
+	cmp	(%rax), %CHAR_REG
+	jne	L(zero_2)
+# endif
+	ret
+
+# ifndef USE_AS_STRCHRNUL
+L(zero_2):
+	xor	%eax, %eax
+	ret
+# endif
+END (STRCHR)
+#endif
diff --git a/sysdeps/x86_64/multiarch/strchr-evex512.S b/sysdeps/x86_64/multiarch/strchr-evex512.S
new file mode 100644
index 0000000000..a4ac022952
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchr-evex512.S
@@ -0,0 +1,8 @@ 
+# ifndef STRCHR
+#  define STRCHR	__strchr_evex512
+# endif
+
+#include "x86-evex512-vecs.h"
+#include "reg-macros.h"
+
+#include "strchr-evex-base.S"
diff --git a/sysdeps/x86_64/multiarch/strchrnul-evex512.S b/sysdeps/x86_64/multiarch/strchrnul-evex512.S
new file mode 100644
index 0000000000..1be0b12f38
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchrnul-evex512.S
@@ -0,0 +1,8 @@ 
+#ifndef STRCHRNUL
+# define STRCHRNUL	__strchrnul_evex512
+#endif
+
+#define STRCHR	STRCHRNUL
+#define USE_AS_STRCHRNUL 1
+
+#include "strchr-evex512.S"
diff --git a/sysdeps/x86_64/multiarch/wcschr-evex512.S b/sysdeps/x86_64/multiarch/wcschr-evex512.S
new file mode 100644
index 0000000000..3fe4e77a70
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wcschr-evex512.S
@@ -0,0 +1,9 @@ 
+#ifndef WCSCHR
+# define WCSCHR	__wcschr_evex512
+#endif
+
+#define STRCHR	WCSCHR
+#define USE_AS_WCSCHR 1
+
+#define USE_WIDE_CHAR 1
+#include "strchr-evex512.S"