Patchwork x86_64: memset optimized with AVX512

login
register
mail settings
Submitter Andrew Senkevich
Date Dec. 3, 2015, 8:46 p.m.
Message ID <CAMXFM3v7K1bO8aUB=rVBzKqAZJuNrMBh5NuyszONtB-sZrXcgA@mail.gmail.com>
Download mbox | patch
Permalink /patch/9873/
State New
Headers show

Comments

Andrew Senkevich - Dec. 3, 2015, 8:46 p.m.
Hi,

here is AVX512 memset implementation.
It is faster enough than AVX2 memset on KNL hardware, performance
results attached.

2015-12-03  Andrew Senkevich  <andrew.senkevich@intel.com>

        * sysdeps/x86_64/multiarch/Makefile: Added new file.
        * sysdeps/x86_64/multiarch/ifunc-impl-list.c: Added test.
        * sysdeps/x86_64/multiarch/memset-avx512.S: New file.
        * sysdeps/x86_64/multiarch/memset.S: Added new IFUNC branch.
        * sysdeps/x86_64/multiarch/memset-avx2.S: Added libc_hidden_def for
        __memset_avx2.



--
WBR,
Andrew
H.J. Lu - Dec. 3, 2015, 9:21 p.m.
On Thu, Dec 3, 2015 at 12:46 PM, Andrew Senkevich
<andrew.n.senkevich@gmail.com> wrote:
> Hi,
>
> here is AVX512 memset implementation.
> It is faster enough than AVX2 memset on KNL hardware, performance
> results attached.
>
> 2015-12-03  Andrew Senkevich  <andrew.senkevich@intel.com>
>
>         * sysdeps/x86_64/multiarch/Makefile: Added new file.
>         * sysdeps/x86_64/multiarch/ifunc-impl-list.c: Added test.
>         * sysdeps/x86_64/multiarch/memset-avx512.S: New file.
>         * sysdeps/x86_64/multiarch/memset.S: Added new IFUNC branch.
>         * sysdeps/x86_64/multiarch/memset-avx2.S: Added libc_hidden_def for
>         __memset_avx2.
>
> diff --git a/sysdeps/x86_64/multiarch/Makefile
> b/sysdeps/x86_64/multiarch/Makefile
> index bb811c2..5bb859e 100644
> --- a/sysdeps/x86_64/multiarch/Makefile
> +++ b/sysdeps/x86_64/multiarch/Makefile
> @@ -18,7 +18,7 @@ sysdep_routines += strncat-c stpncpy-c strncpy-c
> strcmp-ssse3 \
>                    stpcpy-sse2-unaligned stpncpy-sse2-unaligned \
>                    strcat-sse2-unaligned strncat-sse2-unaligned \
>                    strchr-sse2-no-bsf memcmp-ssse3 strstr-sse2-unaligned \
> -                  strcspn-c strpbrk-c strspn-c varshift memset-avx2
> +                  strcspn-c strpbrk-c strspn-c varshift memset-avx2
> memset-avx512
>  CFLAGS-varshift.c += -msse4
>  CFLAGS-strcspn-c.c += -msse4
>  CFLAGS-strpbrk-c.c += -msse4
> diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> index 5c0c219..a8be3eb 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> @@ -83,7 +83,10 @@ __libc_ifunc_impl_list (const char *name, struct
> libc_ifunc_impl *array,
>               IFUNC_IMPL_ADD (array, i, memset, 1, __memset_sse2)
>               IFUNC_IMPL_ADD (array, i, memset,
>                               HAS_ARCH_FEATURE (AVX2_Usable),
> -                             __memset_avx2))
> +                             __memset_avx2)
> +             IFUNC_IMPL_ADD (array, i, memset,
> +                             HAS_ARCH_FEATURE (AVX512F_Usable),
> +                             __memset_avx512))
>
>    /* Support sysdeps/x86_64/multiarch/stpncpy.S.  */
>    IFUNC_IMPL (i, name, stpncpy,
> diff --git a/sysdeps/x86_64/multiarch/memset-avx2.S
> b/sysdeps/x86_64/multiarch/memset-avx2.S
> index 28eabad..f96baf3 100644
> --- a/sysdeps/x86_64/multiarch/memset-avx2.S
> +++ b/sysdeps/x86_64/multiarch/memset-avx2.S
> @@ -165,4 +165,6 @@ L(gobble_data):
>         ret
>
>  END (MEMSET)
> +libc_hidden_def (__memset_avx2)

Why is this change needed?  If it is needed, please submit
a separate patch.

> +
>  #endif
> diff --git a/sysdeps/x86_64/multiarch/memset-avx512.S
> b/sysdeps/x86_64/multiarch/memset-avx512.S
> new file mode 100644
> index 0000000..083177e
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/memset-avx512.S
> @@ -0,0 +1,191 @@
> +/* memset optimized with AVX512.
> +   Copyright (C) 2015 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <sysdep.h>
> +
> +#if IS_IN (libc)
> +
> +#include "asm-syntax.h"
> +#ifndef MEMSET
> +# define MEMSET        __memset_avx512
> +#endif
> +

Should __memset_chk_avx512 also be provided?
Richard Henderson - Dec. 4, 2015, 1:57 p.m.
On 12/03/2015 12:46 PM, Andrew Senkevich wrote:
> +       .section .text,"ax",@progbits
> +ENTRY (MEMSET)
> +#ifdef HAVE_AVX512_ASM_SUPPORT

Surely you'd just wrap the entire function with this ifdef,
so that you don't produce the __memset_avx512 symbol at all.


> +       vpxor   %xmm0, %xmm0, %xmm0
> +       vmovd   %esi, %xmm1
...
> +       vpshufb %xmm0, %xmm1, %xmm0
...
> +       vbroadcastss    %xmm0, %zmm2

Surely vpbroadcastb %esi, %{x,z}mm2

Do you need a vzeroupper on the exit paths that use the zmm/ymm register?
We've got them in the avx2 memset...


r~
Richard Henderson - Dec. 4, 2015, 8:29 p.m.
On 12/04/2015 05:57 AM, Richard Henderson wrote:
> Surely vpbroadcastb %esi, %{x,z}mm2

Ho hum.  I forgot that avx512 got split into 6 pieces.  Nevermind this bit.


r~

Patch

diff --git a/sysdeps/x86_64/multiarch/Makefile
b/sysdeps/x86_64/multiarch/Makefile
index bb811c2..5bb859e 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -18,7 +18,7 @@  sysdep_routines += strncat-c stpncpy-c strncpy-c
strcmp-ssse3 \
                   stpcpy-sse2-unaligned stpncpy-sse2-unaligned \
                   strcat-sse2-unaligned strncat-sse2-unaligned \
                   strchr-sse2-no-bsf memcmp-ssse3 strstr-sse2-unaligned \
-                  strcspn-c strpbrk-c strspn-c varshift memset-avx2
+                  strcspn-c strpbrk-c strspn-c varshift memset-avx2
memset-avx512
 CFLAGS-varshift.c += -msse4
 CFLAGS-strcspn-c.c += -msse4
 CFLAGS-strpbrk-c.c += -msse4
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index 5c0c219..a8be3eb 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -83,7 +83,10 @@  __libc_ifunc_impl_list (const char *name, struct
libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, memset, 1, __memset_sse2)
              IFUNC_IMPL_ADD (array, i, memset,
                              HAS_ARCH_FEATURE (AVX2_Usable),
-                             __memset_avx2))
+                             __memset_avx2)
+             IFUNC_IMPL_ADD (array, i, memset,
+                             HAS_ARCH_FEATURE (AVX512F_Usable),
+                             __memset_avx512))

   /* Support sysdeps/x86_64/multiarch/stpncpy.S.  */
   IFUNC_IMPL (i, name, stpncpy,
diff --git a/sysdeps/x86_64/multiarch/memset-avx2.S
b/sysdeps/x86_64/multiarch/memset-avx2.S
index 28eabad..f96baf3 100644
--- a/sysdeps/x86_64/multiarch/memset-avx2.S
+++ b/sysdeps/x86_64/multiarch/memset-avx2.S
@@ -165,4 +165,6 @@  L(gobble_data):
        ret

 END (MEMSET)
+libc_hidden_def (__memset_avx2)
+
 #endif
diff --git a/sysdeps/x86_64/multiarch/memset-avx512.S
b/sysdeps/x86_64/multiarch/memset-avx512.S
new file mode 100644
index 0000000..083177e
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memset-avx512.S
@@ -0,0 +1,191 @@ 
+/* memset optimized with AVX512.
+   Copyright (C) 2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#if IS_IN (libc)
+
+#include "asm-syntax.h"
+#ifndef MEMSET
+# define MEMSET        __memset_avx512
+#endif
+
+       .section .text,"ax",@progbits
+ENTRY (MEMSET)
+#ifdef HAVE_AVX512_ASM_SUPPORT
+       vpxor   %xmm0, %xmm0, %xmm0
+       vmovd   %esi, %xmm1
+       lea     (%rdi, %rdx), %rsi
+       mov     %rdi, %rax
+       vpshufb %xmm0, %xmm1, %xmm0
+       cmp     $16, %rdx
+       jb      L(less_16bytes)
+       cmp     $512, %rdx
+       vbroadcastss    %xmm0, %zmm2
+       ja      L(512bytesormore)
+       cmp     $256, %rdx
+       jb      L(less_256bytes)
+       vmovups %zmm2, (%rdi)
+       vmovups %zmm2, 0x40(%rdi)
+       vmovups %zmm2, 0x80(%rdi)
+       vmovups %zmm2, 0xC0(%rdi)
+       vmovups %zmm2, -0x100(%rsi)
+       vmovups %zmm2, -0xC0(%rsi)
+       vmovups %zmm2, -0x80(%rsi)
+       vmovups %zmm2, -0x40(%rsi)
+       ret
+
+L(less_256bytes):
+       cmp     $128, %dl
+       jb      L(less_128bytes)
+       vmovups %zmm2, (%rdi)
+       vmovups %zmm2, 0x40(%rdi)
+       vmovups %zmm2, -0x80(%rsi)
+       vmovups %zmm2, -0x40(%rsi)
+       ret
+
+L(less_128bytes):
+       cmp     $64, %dl
+       jb      L(less_64bytes)
+       vmovups %zmm2, (%rdi)
+       vmovups %zmm2, -0x40(%rsi)
+       ret
+
+L(less_64bytes):
+       cmp     $32, %dl
+       jb      L(less_32bytes)
+       vmovdqu %ymm2, (%rdi)
+       vmovdqu %ymm2, -0x20(%rsi)
+       ret
+
+L(less_32bytes):
+       vmovdqu %xmm0, (%rdi)
+       vmovdqu %xmm0, -0x10(%rsi)
+       ret
+
+L(less_16bytes):
+       cmp     $8, %dl
+       jb      L(less_8bytes)
+       vmovq   %xmm0, (%rdi)
+       vmovq   %xmm0, -0x08(%rsi)
+       ret
+
+L(less_8bytes):
+       vmovd   %xmm0, %ecx
+       cmp     $4, %dl
+       jb      L(less_4bytes)
+       mov     %ecx, (%rdi)
+       mov     %ecx, -0x04(%rsi)
+       ret
+
+L(less_4bytes):
+       cmp     $2, %dl
+       jb      L(less_2bytes)
+       mov     %cx, (%rdi)
+       mov     %cx, -0x02(%rsi)
+       ret
+
+L(less_2bytes):
+       cmp     $1, %dl
+       jb      L(less_1bytes)
+       mov     %cl, (%rdi)
+L(less_1bytes):
+       ret
+
+L(512bytesormore):
+       mov     __x86_shared_cache_size_half(%rip), %rcx
+       cmp     %rcx, %rdx
+       ja      L(preloop_large)
+       cmp     $1024, %rdx
+       ja      L(1024bytesormore)
+
+       vmovups %zmm2, (%rdi)
+       vmovups %zmm2, 0x40(%rdi)
+       vmovups %zmm2, 0x80(%rdi)
+       vmovups %zmm2, 0xC0(%rdi)
+       vmovups %zmm2, 0x100(%rdi)
+       vmovups %zmm2, 0x140(%rdi)
+       vmovups %zmm2, 0x180(%rdi)
+       vmovups %zmm2, 0x1C0(%rdi)
+       vmovups %zmm2, -0x200(%rsi)
+       vmovups %zmm2, -0x1C0(%rsi)
+       vmovups %zmm2, -0x180(%rsi)
+       vmovups %zmm2, -0x140(%rsi)
+       vmovups %zmm2, -0x100(%rsi)
+       vmovups %zmm2, -0xC0(%rsi)
+       vmovups %zmm2, -0x80(%rsi)
+       vmovups %zmm2, -0x40(%rsi)
+       ret
+
+/* Align on 64 and loop with aligned stores.  */
+L(1024bytesormore):
+       sub     $0x100, %rsi
+       vmovups %zmm2, (%rax)
+       and     $-0x40, %rdi
+       add     $0x40, %rdi
+
+L(gobble_256bytes_loop):
+       vmovaps %zmm2, (%rdi)
+       vmovaps %zmm2, 0x40(%rdi)
+       vmovaps %zmm2, 0x80(%rdi)
+       vmovaps %zmm2, 0xC0(%rdi)
+       add     $0x100, %rdi
+       cmp     %rsi, %rdi
+       jb      L(gobble_256bytes_loop)
+       vmovups %zmm2, (%rsi)
+       vmovups %zmm2, 0x40(%rsi)
+       vmovups %zmm2, 0x80(%rsi)
+       vmovups %zmm2, 0xC0(%rsi)
+       ret
+
+/* Align on 128 and loop with non-temporal stores.  */
+L(preloop_large):
+       and     $-0x80, %rdi
+       add     $0x80, %rdi
+       vmovups %zmm2, (%rax)
+       vmovups %zmm2, 0x40(%rax)
+       sub     $0x200, %rsi
+
+L(gobble_512bytes_nt_loop):
+       vmovntdq %zmm2, (%rdi)
+       vmovntdq %zmm2, 0x40(%rdi)
+       vmovntdq %zmm2, 0x80(%rdi)
+       vmovntdq %zmm2, 0xC0(%rdi)
+       vmovntdq %zmm2, 0x100(%rdi)
+       vmovntdq %zmm2, 0x140(%rdi)
+       vmovntdq %zmm2, 0x180(%rdi)
+       vmovntdq %zmm2, 0x1C0(%rdi)
+       add     $0x200, %rdi
+       cmp     %rsi, %rdi
+       jb      L(gobble_512bytes_nt_loop)
+       sfence
+       vmovups %zmm2, (%rsi)
+       vmovups %zmm2, 0x40(%rsi)
+       vmovups %zmm2, 0x80(%rsi)
+       vmovups %zmm2, 0xC0(%rsi)
+       vmovups %zmm2, 0x100(%rsi)
+       vmovups %zmm2, 0x140(%rsi)
+       vmovups %zmm2, 0x180(%rsi)
+       vmovups %zmm2, 0x1C0(%rsi)
+       ret
+#else
+       call    HIDDEN_JUMPTARGET(__memset_avx2)
+       ret
+#endif
+END (MEMSET)
+#endif
diff --git a/sysdeps/x86_64/multiarch/memset.S
b/sysdeps/x86_64/multiarch/memset.S
index dbc00d2..0c8c176 100644
--- a/sysdeps/x86_64/multiarch/memset.S
+++ b/sysdeps/x86_64/multiarch/memset.S
@@ -30,6 +30,11 @@  ENTRY(memset)
        HAS_ARCH_FEATURE (AVX2_Usable)
        jz      2f
        leaq    __memset_avx2(%rip), %rax
+#ifdef HAVE_AVX512_ASM_SUPPORT
+       HAS_ARCH_FEATURE (AVX512F_Usable)
+       jz      2f
+       leaq    __memset_avx512(%rip), %rax
+#endif
 2:     ret
 END(memset)
 #endif