@@ -4,6 +4,7 @@ sysdep_routines += \
memchr-avx2 \
memchr-avx2-rtm \
memchr-evex \
+ memchr-evex512 \
memchr-evex-rtm \
memchr-sse2 \
memcmp-avx2-movbe \
@@ -36,6 +37,7 @@ sysdep_routines += \
rawmemchr-avx2 \
rawmemchr-avx2-rtm \
rawmemchr-evex \
+ rawmemchr-evex512 \
rawmemchr-evex-rtm \
rawmemchr-sse2 \
stpcpy-avx2 \
@@ -156,6 +158,7 @@ sysdep_routines += \
wmemchr-avx2 \
wmemchr-avx2-rtm \
wmemchr-evex \
+ wmemchr-evex512 \
wmemchr-evex-rtm \
wmemchr-sse2 \
wmemcmp-avx2-movbe \
@@ -63,6 +63,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__memchr_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, memchr,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
+ __memchr_evex512)
X86_IFUNC_IMPL_ADD_V4 (array, i, memchr,
(CPU_FEATURE_USABLE (AVX512VL)
&& CPU_FEATURE_USABLE (AVX512BW)
@@ -337,6 +342,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__rawmemchr_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, rawmemchr,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
+ __rawmemchr_evex512)
X86_IFUNC_IMPL_ADD_V4 (array, i, rawmemchr,
(CPU_FEATURE_USABLE (AVX512VL)
&& CPU_FEATURE_USABLE (AVX512BW)
@@ -942,6 +952,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__wmemchr_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, wmemchr,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
+ __wmemchr_evex512)
X86_IFUNC_IMPL_ADD_V4 (array, i, wmemchr,
(CPU_FEATURE_USABLE (AVX512VL)
&& CPU_FEATURE_USABLE (AVX512BW)
new file mode 100644
@@ -0,0 +1,328 @@
+/* Placeholder function, not used by any processor at the moment.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* UNUSED. Exists purely as reference implementation. */
+
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
+# include <sysdep.h>
+
+# ifdef USE_AS_WMEMCHR
+# define CHAR_SIZE 4
+# define VPBROADCAST vpbroadcastd
+# define VPCMPEQ vpcmpeqd
+# define VPCMPNE vpcmpneqd
+# define VPMINU vpminud
+# else
+# define CHAR_SIZE 1
+# define VPBROADCAST vpbroadcastb
+# define VPCMPEQ vpcmpeqb
+# define VPCMPNE vpcmpneqb
+# define VPMINU vpminub
+# endif
+
+# define PAGE_SIZE 4096
+# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE)
+
+# define XMMZERO xmm16
+# if VEC_SIZE == 64
+# define KMOV kmovq
+# define KOR korq
+# define NOT notq
+# define KORTEST kortestq
+# define VRAX rax
+# define VRCX rcx
+# define SHR shrq
+# define SARX sarxq
+# define TEXTSUFFIX evex512
+# define VMMZERO zmm16
+# define VMMMATCH zmm17
+# define VMM1 zmm18
+# define VMM2 zmm19
+# define VMM3 zmm20
+# elif VEC_SIZE == 32
+/* Currently Unused. */
+# define KMOV kmovd
+# define KOR kord
+# define NOT notl
+# define KORTEST kortestd
+# define VRAX eax
+# define VRCX ecx
+# define SHR shrl
+# define SARX sarxl
+# define TEXTSUFFIX evex256
+# define VMMZERO ymm16
+# define VMMMATCH ymm17
+# define VMM1 ymm18
+# define VMM2 ymm19
+# define VMM3 ymm20
+# endif
+
+ .section .text.TEXTSUFFIX, "ax", @progbits
+/* Aligning entry point to 64 byte, provides better performance for
+ one vector length string. */
+ENTRY_P2ALIGN (MEMCHR, 6)
+# ifndef USE_AS_RAWMEMCHR
+ /* Check for zero length. */
+ test %RDX_LP, %RDX_LP
+ jz L(zero)
+
+# ifdef __ILP32__
+ /* Clear the upper 32 bits. */
+ movl %edx, %edx
+# endif
+# endif
+
+ /* Broadcast CHAR to VMMMATCH. */
+ VPBROADCAST %esi, %VMMMATCH
+ movl %edi, %eax
+ andl $(PAGE_SIZE - 1), %eax
+ cmpl $(PAGE_SIZE - VEC_SIZE), %eax
+ ja L(page_cross)
+
+ /* Compare [w]char for null, mask bit will be set for match. */
+ VPCMPEQ (%rdi), %VMMMATCH, %k0
+
+ KMOV %k0, %VRAX
+# ifndef USE_AS_RAWMEMCHR
+ bsf %VRAX, %VRCX
+ jz L(align_more)
+ xor %eax, %eax
+# ifdef USE_AS_WMEMCHR
+ leaq (%rdi, %rcx, CHAR_SIZE), %rdi
+# else
+ addq %rcx, %rdi
+# endif
+ cmp %rcx, %rdx
+ cmova %rdi, %rax
+# else
+ bsf %VRAX, %VRAX
+ jz L(align_more)
+ add %rdi, %rax
+# endif
+ ret
+
+# ifndef USE_AS_RAWMEMCHR
+L(zero):
+ xorl %eax, %eax
+ ret
+# endif
+
+ .p2align 5,,5
+L(page_cross):
+ movq %rdi, %rcx
+ andq $-VEC_SIZE, %rcx
+
+ VPCMPEQ (%rcx), %VMMMATCH, %k0
+ KMOV %k0, %VRCX
+ SARX %VRAX, %VRCX, %VRAX
+# ifndef USE_AS_RAWMEMCHR
+ bsf %VRAX, %VRCX
+ jz L(align_more)
+ xor %eax, %eax
+# ifdef USE_AS_WMEMCHR
+ leaq (%rdi, %rcx, CHAR_SIZE), %rdi
+# else
+ addq %rcx, %rdi
+# endif
+ cmp %rcx, %rdx
+ cmova %rdi, %rax
+
+# else
+ bsf %rax, %rax
+ jz L(align_more)
+ add %rdi, %rax
+# endif
+ ret
+
+L(ret_vec_x2):
+ subq $-VEC_SIZE, %rdi
+L(ret_vec_x1):
+ bsf %VRAX, %VRAX
+# ifndef USE_AS_RAWMEMCHR
+ jz L(zero)
+ cmp %rax, %rdx
+ jbe L(zero)
+# endif
+# ifdef USE_AS_WMEMCHR
+ leaq (%rdi, %rax, CHAR_SIZE), %rax
+# else
+ add %rdi, %rax
+# endif
+ ret
+
+ .p2align 5,,10
+L(align_more):
+# ifndef USE_AS_RAWMEMCHR
+ xor %eax, %eax
+ subq %rdi, %rax
+# endif
+
+ subq $-VEC_SIZE, %rdi
+ /* Align rdi to VEC_SIZE. */
+ andq $-VEC_SIZE, %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ addq %rdi, %rax
+# ifdef USE_AS_WMEMCHR
+ sarl $2, %eax
+# endif
+ subq %rax, %rdx
+ jbe L(zero)
+# endif
+
+ /* Loop unroll 4 times for 4 vector loop. */
+ VPCMPEQ (%rdi), %VMMMATCH, %k0
+
+ KMOV %k0, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x1)
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(zero)
+# endif
+
+ VPCMPEQ VEC_SIZE(%rdi), %VMMMATCH, %k0
+
+ KMOV %k0, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x2)
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(zero)
+# endif
+
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %VMMMATCH, %k0
+
+ KMOV %k0, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x3)
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(zero)
+# endif
+
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %VMMMATCH, %k0
+
+ KMOV %k0, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x4)
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(zero)
+ /* Save pointer to find alignment adjustment. */
+ movq %rdi, %rax
+# endif
+ /* Align address to VEC_SIZE * 4 for loop. */
+ andq $-(VEC_SIZE * 4), %rdi
+
+ /* Add alignment difference to rdx. */
+# ifndef USE_AS_RAWMEMCHR
+ subq %rdi, %rax
+# ifdef USE_AS_WMEMCHR
+ SHR $2, %VRAX
+# endif
+ addq %rax, %rdx
+# endif
+ vpxorq %XMMZERO, %XMMZERO, %XMMZERO
+
+ /* 4 vector loop. */
+ .p2align 5,,11
+L(loop):
+
+ VPCMPNE (VEC_SIZE * 4)(%rdi), %VMMMATCH, %k1
+ vpxorq (VEC_SIZE * 5)(%rdi), %VMMMATCH, %VMM1
+ vpxorq (VEC_SIZE * 6)(%rdi), %VMMMATCH, %VMM2
+ VPCMPEQ (VEC_SIZE * 7)(%rdi), %VMMMATCH, %k3
+ VPMINU %VMM1, %VMM2, %VMM3{%k1}{z}
+ VPCMPEQ %VMM3, %VMMZERO, %k2
+
+ subq $-(VEC_SIZE * 4), %rdi
+ KORTEST %k2, %k3
+# ifdef USE_AS_RAWMEMCHR
+ jz L(loop)
+# else
+ jnz L(loopend)
+ subq $(CHAR_PER_VEC * 4), %rdx
+ ja L(loop)
+ xor %eax, %eax
+ ret
+# endif
+
+L(loopend):
+ VPCMPEQ (%rdi), %VMMMATCH, %k1
+ KMOV %k1, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x1)
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(zero)
+# endif
+
+ VPCMPEQ VEC_SIZE(%rdi), %VMMMATCH, %k1
+ KMOV %k1, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x2)
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(zero)
+# endif
+
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %VMMMATCH, %k1
+ KMOV %k1, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x3)
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $CHAR_PER_VEC, %rdx
+ jbe L(zero)
+# endif
+
+ /* At this point null [w]char must be in the fourth vector so no
+ need to check. */
+ KMOV %k3, %VRAX
+
+L(ret_vec_x4):
+ bsf %VRAX, %VRAX
+# ifndef USE_AS_RAWMEMCHR
+ cmp %rax, %rdx
+ jbe L(zero)
+# endif
+ leaq (VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax
+ ret
+
+ .p2align 5,,5
+L(ret_vec_x3):
+ bsf %VRAX, %VRAX
+# ifndef USE_AS_RAWMEMCHR
+ cmp %rax, %rdx
+ jbe L(zero)
+# endif
+ leaq (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax
+ ret
+
+END (MEMCHR)
+#endif
new file mode 100644
@@ -0,0 +1,7 @@
+# ifndef MEMCHR
+# define MEMCHR __memchr_evex512
+# endif
+
+#define VEC_SIZE 64
+
+#include "memchr-evex-base.S"
new file mode 100644
@@ -0,0 +1,7 @@
+#ifndef RAWMEMCHR
+# define RAWMEMCHR __rawmemchr_evex512
+#endif
+#define USE_AS_RAWMEMCHR 1
+#define MEMCHR RAWMEMCHR
+
+#include "memchr-evex512.S"
new file mode 100644
@@ -0,0 +1,8 @@
+#ifndef WMEMCHR
+# define WMEMCHR __wmemchr_evex512
+#endif
+
+#define MEMCHR WMEMCHR
+#define USE_AS_WMEMCHR 1
+
+#include "memchr-evex512.S"