powerpc64le: Optimize memcpy for POWER10

Message ID 20210428023138.795316-1-tuliom@linux.ibm.com
State Superseded
Headers
Series powerpc64le: Optimize memcpy for POWER10 |

Commit Message

Tulio Magno Quites Machado Filho April 28, 2021, 2:31 a.m. UTC
  This implementation is based on __memcpy_power8_cached and integrates
suggestions from Anton Blanchard.
It benefits from loads and stores with length for short lengths and for
tail code, simplifying the code.

All unaligned memory accesses use instructions that do not generate
alignment interrupts on POWER10, making it safe to use on
caching-inhibited memory.

The main loop has also been modified in order to increase instruction
throughput by reducing the dependency on updates from previous iterations.

On average, this implementation provides around 30% improvement when
compared to __memcpy_power7 and 10% improvement in comparison to
__memcpy_power8_cached.
---
 sysdeps/powerpc/powerpc64/le/power10/memcpy.S | 198 ++++++++++++++++++
 sysdeps/powerpc/powerpc64/multiarch/Makefile  |   3 +-
 .../powerpc64/multiarch/ifunc-impl-list.c     |   6 +
 .../powerpc64/multiarch/memcpy-power10.S      |  26 +++
 sysdeps/powerpc/powerpc64/multiarch/memcpy.c  |   7 +
 5 files changed, 239 insertions(+), 1 deletion(-)
 create mode 100644 sysdeps/powerpc/powerpc64/le/power10/memcpy.S
 create mode 100644 sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
  

Comments

Raphael M Zinsly April 28, 2021, 1:31 p.m. UTC | #1
This patch LGTM, thanks!

On 27/04/2021 23:31, Tulio Magno Quites Machado Filho via Libc-alpha wrote:
> This implementation is based on __memcpy_power8_cached and integrates
> suggestions from Anton Blanchard.
> It benefits from loads and stores with length for short lengths and for
> tail code, simplifying the code.
> 
> All unaligned memory accesses use instructions that do not generate
> alignment interrupts on POWER10, making it safe to use on
> caching-inhibited memory.
> 
> The main loop has also been modified in order to increase instruction
> throughput by reducing the dependency on updates from previous iterations.
> 
> On average, this implementation provides around 30% improvement when
> compared to __memcpy_power7 and 10% improvement in comparison to
> __memcpy_power8_cached.
> ---
>   sysdeps/powerpc/powerpc64/le/power10/memcpy.S | 198 ++++++++++++++++++
>   sysdeps/powerpc/powerpc64/multiarch/Makefile  |   3 +-
>   .../powerpc64/multiarch/ifunc-impl-list.c     |   6 +
>   .../powerpc64/multiarch/memcpy-power10.S      |  26 +++
>   sysdeps/powerpc/powerpc64/multiarch/memcpy.c  |   7 +
>   5 files changed, 239 insertions(+), 1 deletion(-)
>   create mode 100644 sysdeps/powerpc/powerpc64/le/power10/memcpy.S
>   create mode 100644 sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> 
> diff --git a/sysdeps/powerpc/powerpc64/le/power10/memcpy.S b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
> new file mode 100644
> index 0000000000..f84acabec5
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
> @@ -0,0 +1,198 @@
> +/* Optimized memcpy implementation for POWER10.
> +   Copyright (C) 2021 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <sysdep.h>
> +
> +
> +#ifndef MEMCPY
> +# define MEMCPY memcpy
> +#endif
> +
> +/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
> +	   Returns 'dst'.  */
> +
> +	.machine power9
> +ENTRY_TOCLESS (MEMCPY, 5)
> +	CALL_MCOUNT 3
> +
> +	/* Copy up to 16 bytes.  */
> +	sldi	r6,r5,56	/* Prepare [l|st]xvl counter.  */
> +	lxvl	v10,r4,r6
> +	stxvl	v10,r3,r6
> +	subic.	r6,r5,16	/* Return if len <= 16.  */
> +	blelr
> +
> +	/* If len >= 256, assume nothing got copied before and copy
> +	   again.  This might cause issues with overlapped memory, but memcpy
> +	   is not expected to treat overlapped memory.  */
> +	cmpdi	r5,256
> +	bge	L(copy_ge_256)
> +	/* 16 < len < 256 and the first 16 bytes have already been copied.  */
> +	addi	r10,r3,16	/* Keep r3 intact as return value.  */
> +	addi	r4,r4,16
> +	subic	r5,r5,16
> +	b	L(copy_lt_256)	/* Avoid the main loop if len < 256.  */
> +
> +	.p2align 5
> +L(copy_ge_256):
> +	mr	r10,r3		/* Keep r3 intact as return value.  */
> +	/* Align dst to 16 bytes.  */
> +	andi.	r9,r10,0xf
> +	beq	L(dst_is_align_16)
> +	lxv	v10,0(r4)
> +	subfic	r12,r9,16
> +	subf	r5,r12,r5
> +	add	r4,r4,r12
> +	stxv	v10,0(r10)
> +	add	r10,r10,r12
> +
> +L(dst_is_align_16):
> +	srdi	r9,r5,7		/* Divide by 128.  */
> +	mtctr	r9
> +	addi	r6,r4,64
> +	addi	r7,r10,64
> +
> +
> +	/* Main loop, copy 128 bytes per iteration.
> +	   Use r6=src+64 and r7=dest+64 in order to reduce the dependency on
> +	   r4 and r10.  */
> +	.p2align 5
> +L(copy_128):
> +
> +	lxv	v10, 0(r4)
> +	lxv	v11, 16(r4)
> +	lxv	v12, 32(r4)
> +	lxv	v13, 48(r4)
> +
> +	addi	r4,r4,128
> +
> +	stxv	v10, 0(r10)
> +	stxv	v11, 16(r10)
> +	stxv	v12, 32(r10)
> +	stxv	v13, 48(r10)
> +
> +	addi	r10,r10,128
> +
> +	lxv	v10, 0(r6)
> +	lxv	v11, 16(r6)
> +	lxv	v12, 32(r6)
> +	lxv	v13, 48(r6)
> +
> +	addi	r6,r6,128
> +
> +	stxv	v10, 0(r7)
> +	stxv	v11, 16(r7)
> +	stxv	v12, 32(r7)
> +	stxv	v13, 48(r7)
> +
> +	addi	r7,r7,128
> +
> +	bdnz	L(copy_128)
> +
> +	clrldi.	r5,r5,64-7	/* Have we copied everything?  */
> +	beqlr
> +
> +	.p2align 5
> +L(copy_lt_256):
> +	cmpdi	r5,16
> +	ble	L(copy_le_16)
> +	srdi.	r9,r5,5		/* Divide by 32.  */
> +	beq	L(copy_lt_32)
> +	mtctr	r9
> +	/* Use r6=src+32, r7=dest+32, r8=src+64, r9=dest+64 in order to reduce
> +	   the dependency on r4 and r10.  */
> +	addi	r6,r4,32
> +	addi	r7,r10,32
> +	addi	r8,r4,64
> +	addi	r9,r10,64
> +
> +	.p2align 5
> +	/* Copy 32 bytes at a time, unaligned.
> +	   The loop is unrolled 3 times in order to reduce the dependency on
> +	   r4 and r10, copying up-to 96 bytes per iteration.  */
> +L(copy_32):
> +	lxv	v10, 0(r4)
> +	lxv	v11, 16(r4)
> +	stxv	v10, 0(r10)
> +	stxv	v11, 16(r10)
> +	bdz	L(end_copy_32a)
> +	addi	r4,r4,96
> +	addi	r10,r10,96
> +
> +	lxv	v10, 0(r6)
> +	lxv	v11, 16(r6)
> +	addi	r6,r6,96
> +	stxv	v10, 0(r7)
> +	stxv	v11, 16(r7)
> +	bdz	L(end_copy_32b)
> +	addi	r7,r7,96
> +
> +	lxv	v12, 0(r8)
> +	lxv	v13, 16(r8)
> +	addi	r8,r8,96
> +	stxv	v12, 0(r9)
> +	stxv	v13, 16(r9)
> +	addi	r9,r9,96
> +	bdnz	L(copy_32)
> +
> +	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
> +	beqlr
> +	cmpdi	r5,16
> +	ble	L(copy_le_16)
> +	b	L(copy_lt_32)
> +
> +	.p2align 5
> +L(end_copy_32a):
> +	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
> +	beqlr
> +	/* 32 bytes have been copied since the last update of r4 and r10.  */
> +	addi	r4,r4,32
> +	addi	r10,r10,32
> +	cmpdi	r5,16
> +	ble	L(copy_le_16)
> +	b	L(copy_lt_32)
> +
> +	.p2align 5
> +L(end_copy_32b):
> +	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
> +	beqlr
> +	/* The last iteration of the loop copied 64 bytes.  Update r4 and r10
> +	   accordingly.  */
> +	addi	r4,r4,-32
> +	addi	r10,r10,-32
> +	cmpdi	r5,16
> +	ble	L(copy_le_16)
> +
> +	.p2align 5
> +L(copy_lt_32):
> +	lxv	v10, 0(r4)
> +	stxv	v10, 0(r10)
> +	addi	r4,r4,16
> +	addi	r10,r10,16
> +	subic	r5,r5,16
> +
> +	.p2align 5
> +L(copy_le_16):
> +	sldi	r6,r5,56
> +	lxvl	v10,r4,r6
> +	stxvl	v10,r10,r6
> +	blr
> +
> +
> +END_GEN_TB (MEMCPY,TB_TOCLESS)
> +libc_hidden_builtin_def (memcpy)
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
> index 8aa46a3702..fdaa5ddb24 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
> +++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
> @@ -1,5 +1,6 @@
>   ifeq ($(subdir),string)
> -sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
> +sysdep_routines += memcpy-power10 \
> +		   memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
>   		   memcpy-cell memcpy-power4 memcpy-ppc64 \
>   		   memcmp-power8 memcmp-power7 memcmp-power4 memcmp-ppc64 \
>   		   memset-power7 memset-power6 memset-power4 \
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> index 1a6993616f..7bb3028676 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> @@ -51,6 +51,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>   #ifdef SHARED
>     /* Support sysdeps/powerpc/powerpc64/multiarch/memcpy.c.  */
>     IFUNC_IMPL (i, name, memcpy,
> +#ifdef __LITTLE_ENDIAN__
> +	      IFUNC_IMPL_ADD (array, i, memcpy,
> +			      hwcap2 & PPC_FEATURE2_ARCH_3_1
> +			      && hwcap & PPC_FEATURE_HAS_VSX,
> +			      __memcpy_power10)
> +#endif
>   	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap2 & PPC_FEATURE2_ARCH_2_07,
>   			      __memcpy_power8_cached)
>   	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_HAS_VSX,
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> new file mode 100644
> index 0000000000..70e0fc3ed6
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> @@ -0,0 +1,26 @@
> +/* Optimized memcpy implementation for POWER10.
> +   Copyright (C) 2021 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
> +#define MEMCPY __memcpy_power10
> +
> +#undef libc_hidden_builtin_def
> +#define libc_hidden_builtin_def(name)
> +
> +#include <sysdeps/powerpc/powerpc64/le/power10/memcpy.S>
> +#endif
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> index 5733192932..53ab32ef26 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> +++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> @@ -36,8 +36,15 @@ extern __typeof (__redirect_memcpy) __memcpy_power6 attribute_hidden;
>   extern __typeof (__redirect_memcpy) __memcpy_a2 attribute_hidden;
>   extern __typeof (__redirect_memcpy) __memcpy_power7 attribute_hidden;
>   extern __typeof (__redirect_memcpy) __memcpy_power8_cached attribute_hidden;
> +# if defined __LITTLE_ENDIAN__
> +extern __typeof (__redirect_memcpy) __memcpy_power10 attribute_hidden;
> +# endif
> 
>   libc_ifunc (__libc_memcpy,
> +# if defined __LITTLE_ENDIAN__
> +	    (hwcap2 & PPC_FEATURE2_ARCH_3_1 && hwcap & PPC_FEATURE_HAS_VSX)
> +	    ? __memcpy_power10 :
> +# endif
>   	    ((hwcap2 & PPC_FEATURE2_ARCH_2_07) && use_cached_memopt)
>   	    ? __memcpy_power8_cached :
>   	      (hwcap & PPC_FEATURE_HAS_VSX)
>
  
Lucas A. M. Magalhaes April 28, 2021, 2:22 p.m. UTC | #2
Hi Tulio,
LGTM, All tests pass.
I have just a single suggestion of change.

Quoting Tulio Magno Quites Machado Filho via Libc-alpha (2021-04-27 23:31:38)
> This implementation is based on __memcpy_power8_cached and integrates
> suggestions from Anton Blanchard.
> It benefits from loads and stores with length for short lengths and for
> tail code, simplifying the code.
> 
> All unaligned memory accesses use instructions that do not generate
> alignment interrupts on POWER10, making it safe to use on
> caching-inhibited memory.
> 
> The main loop has also been modified in order to increase instruction
> throughput by reducing the dependency on updates from previous iterations.
> 
> On average, this implementation provides around 30% improvement when
> compared to __memcpy_power7 and 10% improvement in comparison to
> __memcpy_power8_cached.
> ---
>  sysdeps/powerpc/powerpc64/le/power10/memcpy.S | 198 ++++++++++++++++++
>  sysdeps/powerpc/powerpc64/multiarch/Makefile  |   3 +-
>  .../powerpc64/multiarch/ifunc-impl-list.c     |   6 +
>  .../powerpc64/multiarch/memcpy-power10.S      |  26 +++
>  sysdeps/powerpc/powerpc64/multiarch/memcpy.c  |   7 +
>  5 files changed, 239 insertions(+), 1 deletion(-)
>  create mode 100644 sysdeps/powerpc/powerpc64/le/power10/memcpy.S
>  create mode 100644 sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> 
> diff --git a/sysdeps/powerpc/powerpc64/le/power10/memcpy.S b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
> new file mode 100644
> index 0000000000..f84acabec5
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
> @@ -0,0 +1,198 @@
> +/* Optimized memcpy implementation for POWER10.
> +   Copyright (C) 2021 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <sysdep.h>
> +
> +
> +#ifndef MEMCPY
> +# define MEMCPY memcpy
> +#endif
> +
> +/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
> +          Returns 'dst'.  */
> +
> +       .machine power9
> +ENTRY_TOCLESS (MEMCPY, 5)
> +       CALL_MCOUNT 3
> +
> +       /* Copy up to 16 bytes.  */
> +       sldi    r6,r5,56        /* Prepare [l|st]xvl counter.  */
> +       lxvl    v10,r4,r6
> +       stxvl   v10,r3,r6
> +       subic.  r6,r5,16        /* Return if len <= 16.  */
> +       blelr
> +
> +       /* If len >= 256, assume nothing got copied before and copy
> +          again.  This might cause issues with overlapped memory, but memcpy
> +          is not expected to treat overlapped memory.  */
> +       cmpdi   r5,256
> +       bge     L(copy_ge_256)
> +       /* 16 < len < 256 and the first 16 bytes have already been copied.  */
> +       addi    r10,r3,16       /* Keep r3 intact as return value.  */
> +       addi    r4,r4,16
> +       subic   r5,r5,16
> +       b       L(copy_lt_256)  /* Avoid the main loop if len < 256.  */
> +
Ok.
> +       .p2align 5
> +L(copy_ge_256):
> +       mr      r10,r3          /* Keep r3 intact as return value.  */
> +       /* Align dst to 16 bytes.  */
> +       andi.   r9,r10,0xf
> +       beq     L(dst_is_align_16)
> +       lxv     v10,0(r4)
> +       subfic  r12,r9,16
> +       subf    r5,r12,r5
> +       add     r4,r4,r12
> +       stxv    v10,0(r10)
> +       add     r10,r10,r12
You could use r3 for previous instructions here and just set up r10 here.
> +
> +L(dst_is_align_16):
> +       srdi    r9,r5,7         /* Divide by 128.  */
> +       mtctr   r9
> +       addi    r6,r4,64
> +       addi    r7,r10,64
> +
Ok.

> +
> +       /* Main loop, copy 128 bytes per iteration.
> +          Use r6=src+64 and r7=dest+64 in order to reduce the dependency on
> +          r4 and r10.  */
> +       .p2align 5
> +L(copy_128):
> +
> +       lxv     v10, 0(r4)
> +       lxv     v11, 16(r4)
> +       lxv     v12, 32(r4)
> +       lxv     v13, 48(r4)
> +
> +       addi    r4,r4,128
> +
> +       stxv    v10, 0(r10)
> +       stxv    v11, 16(r10)
> +       stxv    v12, 32(r10)
> +       stxv    v13, 48(r10)
> +
> +       addi    r10,r10,128
> +
> +       lxv     v10, 0(r6)
> +       lxv     v11, 16(r6)
> +       lxv     v12, 32(r6)
> +       lxv     v13, 48(r6)
> +
> +       addi    r6,r6,128
> +
> +       stxv    v10, 0(r7)
> +       stxv    v11, 16(r7)
> +       stxv    v12, 32(r7)
> +       stxv    v13, 48(r7)
> +
> +       addi    r7,r7,128
> +
> +       bdnz    L(copy_128)
> +
> +       clrldi. r5,r5,64-7      /* Have we copied everything?  */
> +       beqlr
> +
Ok.

> +       .p2align 5
> +L(copy_lt_256):
> +       cmpdi   r5,16
> +       ble     L(copy_le_16)
> +       srdi.   r9,r5,5         /* Divide by 32.  */
> +       beq     L(copy_lt_32)
> +       mtctr   r9
> +       /* Use r6=src+32, r7=dest+32, r8=src+64, r9=dest+64 in order to reduce
> +          the dependency on r4 and r10.  */
> +       addi    r6,r4,32
> +       addi    r7,r10,32
> +       addi    r8,r4,64
> +       addi    r9,r10,64
Ok.

> +
> +       .p2align 5
> +       /* Copy 32 bytes at a time, unaligned.
> +          The loop is unrolled 3 times in order to reduce the dependency on
> +          r4 and r10, copying up-to 96 bytes per iteration.  */
> +L(copy_32):
> +       lxv     v10, 0(r4)
> +       lxv     v11, 16(r4)
> +       stxv    v10, 0(r10)
> +       stxv    v11, 16(r10)
> +       bdz     L(end_copy_32a)
> +       addi    r4,r4,96
> +       addi    r10,r10,96
> +
> +       lxv     v10, 0(r6)
> +       lxv     v11, 16(r6)
> +       addi    r6,r6,96
> +       stxv    v10, 0(r7)
> +       stxv    v11, 16(r7)
> +       bdz     L(end_copy_32b)
> +       addi    r7,r7,96
> +
> +       lxv     v12, 0(r8)
> +       lxv     v13, 16(r8)
> +       addi    r8,r8,96
> +       stxv    v12, 0(r9)
> +       stxv    v13, 16(r9)
> +       addi    r9,r9,96
> +       bdnz    L(copy_32)
> +
> +       clrldi. r5,r5,64-5      /* Have we copied everything?  */
> +       beqlr
> +       cmpdi   r5,16
> +       ble     L(copy_le_16)
> +       b       L(copy_lt_32)
> +
Ok.
> +       .p2align 5
> +L(end_copy_32a):
> +       clrldi. r5,r5,64-5      /* Have we copied everything?  */
> +       beqlr
> +       /* 32 bytes have been copied since the last update of r4 and r10.  */
> +       addi    r4,r4,32
> +       addi    r10,r10,32
> +       cmpdi   r5,16
> +       ble     L(copy_le_16)
> +       b       L(copy_lt_32)
> +
Ok.

> +       .p2align 5
> +L(end_copy_32b):
> +       clrldi. r5,r5,64-5      /* Have we copied everything?  */
> +       beqlr
> +       /* The last iteration of the loop copied 64 bytes.  Update r4 and r10
> +          accordingly.  */
> +       addi    r4,r4,-32
> +       addi    r10,r10,-32
> +       cmpdi   r5,16
> +       ble     L(copy_le_16)
> +
Ok.

> +       .p2align 5
> +L(copy_lt_32):
> +       lxv     v10, 0(r4)
> +       stxv    v10, 0(r10)
> +       addi    r4,r4,16
> +       addi    r10,r10,16
> +       subic   r5,r5,16
> +
Ok.

> +       .p2align 5
> +L(copy_le_16):
> +       sldi    r6,r5,56
> +       lxvl    v10,r4,r6
> +       stxvl   v10,r10,r6
> +       blr
Ok.

> +
> +
> +END_GEN_TB (MEMCPY,TB_TOCLESS)
> +libc_hidden_builtin_def (memcpy)
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
> index 8aa46a3702..fdaa5ddb24 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
> +++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
> @@ -1,5 +1,6 @@
>  ifeq ($(subdir),string)
> -sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
> +sysdep_routines += memcpy-power10 \
> +                  memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
>                    memcpy-cell memcpy-power4 memcpy-ppc64 \
>                    memcmp-power8 memcmp-power7 memcmp-power4 memcmp-ppc64 \
>                    memset-power7 memset-power6 memset-power4 \
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> index 1a6993616f..7bb3028676 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> @@ -51,6 +51,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>  #ifdef SHARED
>    /* Support sysdeps/powerpc/powerpc64/multiarch/memcpy.c.  */
>    IFUNC_IMPL (i, name, memcpy,
> +#ifdef __LITTLE_ENDIAN__
> +             IFUNC_IMPL_ADD (array, i, memcpy,
> +                             hwcap2 & PPC_FEATURE2_ARCH_3_1
> +                             && hwcap & PPC_FEATURE_HAS_VSX,
> +                             __memcpy_power10)
> +#endif
>               IFUNC_IMPL_ADD (array, i, memcpy, hwcap2 & PPC_FEATURE2_ARCH_2_07,
>                               __memcpy_power8_cached)
>               IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_HAS_VSX,
Ok.

> diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> new file mode 100644
> index 0000000000..70e0fc3ed6
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> @@ -0,0 +1,26 @@
> +/* Optimized memcpy implementation for POWER10.
> +   Copyright (C) 2021 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
> +#define MEMCPY __memcpy_power10
> +
> +#undef libc_hidden_builtin_def
> +#define libc_hidden_builtin_def(name)
> +
> +#include <sysdeps/powerpc/powerpc64/le/power10/memcpy.S>
> +#endif
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> index 5733192932..53ab32ef26 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> +++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> @@ -36,8 +36,15 @@ extern __typeof (__redirect_memcpy) __memcpy_power6 attribute_hidden;
>  extern __typeof (__redirect_memcpy) __memcpy_a2 attribute_hidden;
>  extern __typeof (__redirect_memcpy) __memcpy_power7 attribute_hidden;
>  extern __typeof (__redirect_memcpy) __memcpy_power8_cached attribute_hidden;
> +# if defined __LITTLE_ENDIAN__
> +extern __typeof (__redirect_memcpy) __memcpy_power10 attribute_hidden;
> +# endif
>  
>  libc_ifunc (__libc_memcpy,
> +# if defined __LITTLE_ENDIAN__
> +           (hwcap2 & PPC_FEATURE2_ARCH_3_1 && hwcap & PPC_FEATURE_HAS_VSX)
> +           ? __memcpy_power10 :
> +# endif
>             ((hwcap2 & PPC_FEATURE2_ARCH_2_07) && use_cached_memopt)
>             ? __memcpy_power8_cached :
>               (hwcap & PPC_FEATURE_HAS_VSX)
Ok.

---
Lucas A. M. Magalhães
  
Raoni Fassina Firmino April 28, 2021, 6:13 p.m. UTC | #3
Tested the patch with --with-cpu=power10 on top of the master
(commit b25b06749179) with no regression from upstream.

Following is my review.

On Tue, Apr 27, 2021 at 11:31:38PM -0300, AL glibc-alpha wrote:
> This implementation is based on __memcpy_power8_cached and integrates
> suggestions from Anton Blanchard.
> It benefits from loads and stores with length for short lengths and for
> tail code, simplifying the code.
> 
> All unaligned memory accesses use instructions that do not generate
> alignment interrupts on POWER10, making it safe to use on
> caching-inhibited memory.
> 
> The main loop has also been modified in order to increase instruction
> throughput by reducing the dependency on updates from previous iterations.
> 
> On average, this implementation provides around 30% improvement when
> compared to __memcpy_power7 and 10% improvement in comparison to
> __memcpy_power8_cached.

ok.


> ---
>  sysdeps/powerpc/powerpc64/le/power10/memcpy.S | 198 ++++++++++++++++++
>  sysdeps/powerpc/powerpc64/multiarch/Makefile  |   3 +-
>  .../powerpc64/multiarch/ifunc-impl-list.c     |   6 +
>  .../powerpc64/multiarch/memcpy-power10.S      |  26 +++
>  sysdeps/powerpc/powerpc64/multiarch/memcpy.c  |   7 +
>  5 files changed, 239 insertions(+), 1 deletion(-)
>  create mode 100644 sysdeps/powerpc/powerpc64/le/power10/memcpy.S
>  create mode 100644 sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> 
> diff --git a/sysdeps/powerpc/powerpc64/le/power10/memcpy.S b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
> new file mode 100644
> index 0000000000..f84acabec5
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
> @@ -0,0 +1,198 @@
> +/* Optimized memcpy implementation for POWER10.
> +   Copyright (C) 2021 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <sysdep.h>
> +
> +
> +#ifndef MEMCPY
> +# define MEMCPY memcpy
> +#endif
> +
> +/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
> +	   Returns 'dst'.  */
> +
> +	.machine power9
> +ENTRY_TOCLESS (MEMCPY, 5)
> +	CALL_MCOUNT 3
> +
> +	/* Copy up to 16 bytes.  */
> +	sldi	r6,r5,56	/* Prepare [l|st]xvl counter.  */
> +	lxvl	v10,r4,r6
> +	stxvl	v10,r3,r6
> +	subic.	r6,r5,16	/* Return if len <= 16.  */
> +	blelr
> +

ok.

Not sure if subtract carrying (sub*c) that is used here and some places
after is needed but don't hurt also.


> +	/* If len >= 256, assume nothing got copied before and copy
> +	   again.  This might cause issues with overlapped memory, but memcpy
> +	   is not expected to treat overlapped memory.  */
> +	cmpdi	r5,256
> +	bge	L(copy_ge_256)
> +	/* 16 < len < 256 and the first 16 bytes have already been copied.  */
> +	addi	r10,r3,16	/* Keep r3 intact as return value.  */
> +	addi	r4,r4,16
> +	subic	r5,r5,16
> +	b	L(copy_lt_256)	/* Avoid the main loop if len < 256.  */

ok.


> +
> +	.p2align 5
> +L(copy_ge_256):
> +	mr	r10,r3		/* Keep r3 intact as return value.  */
> +	/* Align dst to 16 bytes.  */
> +	andi.	r9,r10,0xf
> +	beq	L(dst_is_align_16)
> +	lxv	v10,0(r4)
> +	subfic	r12,r9,16
> +	subf	r5,r12,r5
> +	add	r4,r4,r12
> +	stxv	v10,0(r10)
> +	add	r10,r10,r12
> +

ok.


> +L(dst_is_align_16):
> +	srdi	r9,r5,7		/* Divide by 128.  */
> +	mtctr	r9
> +	addi	r6,r4,64
> +	addi	r7,r10,64
> +

ok.

Should this label also be aligned with ".p2align 5" the same as the
others?


> +
> +	/* Main loop, copy 128 bytes per iteration.
> +	   Use r6=src+64 and r7=dest+64 in order to reduce the dependency on
> +	   r4 and r10.  */
> +	.p2align 5
> +L(copy_128):
> +
> +	lxv	v10, 0(r4)
> +	lxv	v11, 16(r4)
> +	lxv	v12, 32(r4)
> +	lxv	v13, 48(r4)
> +
> +	addi	r4,r4,128
> +
> +	stxv	v10, 0(r10)
> +	stxv	v11, 16(r10)
> +	stxv	v12, 32(r10)
> +	stxv	v13, 48(r10)
> +
> +	addi	r10,r10,128
> +
> +	lxv	v10, 0(r6)
> +	lxv	v11, 16(r6)
> +	lxv	v12, 32(r6)
> +	lxv	v13, 48(r6)
> +
> +	addi	r6,r6,128
> +
> +	stxv	v10, 0(r7)
> +	stxv	v11, 16(r7)
> +	stxv	v12, 32(r7)
> +	stxv	v13, 48(r7)
> +
> +	addi	r7,r7,128
> +
> +	bdnz	L(copy_128)
> +
> +	clrldi.	r5,r5,64-7	/* Have we copied everything?  */
> +	beqlr
> +

ok.


> +	.p2align 5
> +L(copy_lt_256):
> +	cmpdi	r5,16
> +	ble	L(copy_le_16)
> +	srdi.	r9,r5,5		/* Divide by 32.  */
> +	beq	L(copy_lt_32)
> +	mtctr	r9
> +	/* Use r6=src+32, r7=dest+32, r8=src+64, r9=dest+64 in order to reduce
> +	   the dependency on r4 and r10.  */
> +	addi	r6,r4,32
> +	addi	r7,r10,32
> +	addi	r8,r4,64
> +	addi	r9,r10,64
> +

ok.

> +	.p2align 5
> +	/* Copy 32 bytes at a time, unaligned.
> +	   The loop is unrolled 3 times in order to reduce the dependency on
> +	   r4 and r10, copying up-to 96 bytes per iteration.  */
> +L(copy_32):
> +	lxv	v10, 0(r4)
> +	lxv	v11, 16(r4)
> +	stxv	v10, 0(r10)
> +	stxv	v11, 16(r10)
> +	bdz	L(end_copy_32a)
> +	addi	r4,r4,96
> +	addi	r10,r10,96
> +
> +	lxv	v10, 0(r6)
> +	lxv	v11, 16(r6)
> +	addi	r6,r6,96
> +	stxv	v10, 0(r7)
> +	stxv	v11, 16(r7)
> +	bdz	L(end_copy_32b)
> +	addi	r7,r7,96
> +
> +	lxv	v12, 0(r8)
> +	lxv	v13, 16(r8)
> +	addi	r8,r8,96
> +	stxv	v12, 0(r9)
> +	stxv	v13, 16(r9)
> +	addi	r9,r9,96
> +	bdnz	L(copy_32)
> +

ok.


> +	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
> +	beqlr
> +	cmpdi	r5,16
> +	ble	L(copy_le_16)
> +	b	L(copy_lt_32)
> +

ok.


> +	.p2align 5
> +L(end_copy_32a):
> +	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
> +	beqlr
> +	/* 32 bytes have been copied since the last update of r4 and r10.  */
> +	addi	r4,r4,32
> +	addi	r10,r10,32
> +	cmpdi	r5,16
> +	ble	L(copy_le_16)
> +	b	L(copy_lt_32)
> +

ok.


> +	.p2align 5
> +L(end_copy_32b):
> +	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
> +	beqlr
> +	/* The last iteration of the loop copied 64 bytes.  Update r4 and r10
> +	   accordingly.  */
> +	addi	r4,r4,-32
> +	addi	r10,r10,-32
> +	cmpdi	r5,16
> +	ble	L(copy_le_16)
> +

ok.


> +	.p2align 5
> +L(copy_lt_32):
> +	lxv	v10, 0(r4)
> +	stxv	v10, 0(r10)
> +	addi	r4,r4,16
> +	addi	r10,r10,16
> +	subic	r5,r5,16
> +

ok.


> +	.p2align 5
> +L(copy_le_16):
> +	sldi	r6,r5,56
> +	lxvl	v10,r4,r6
> +	stxvl	v10,r10,r6
> +	blr
> +
> +
> +END_GEN_TB (MEMCPY,TB_TOCLESS)
> +libc_hidden_builtin_def (memcpy)


ok.


> diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
> index 8aa46a3702..fdaa5ddb24 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
> +++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
> @@ -1,5 +1,6 @@
>  ifeq ($(subdir),string)
> -sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
> +sysdep_routines += memcpy-power10 \
> +		   memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
>  		   memcpy-cell memcpy-power4 memcpy-ppc64 \
>  		   memcmp-power8 memcmp-power7 memcmp-power4 memcmp-ppc64 \
>  		   memset-power7 memset-power6 memset-power4 \

If memcpy-power10 is only for "le" should it be on the next section,
inside this if:

    ifneq (,$(filter %le,$(config-machine)))


> diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> index 1a6993616f..7bb3028676 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> @@ -51,6 +51,12 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>  #ifdef SHARED
>    /* Support sysdeps/powerpc/powerpc64/multiarch/memcpy.c.  */
>    IFUNC_IMPL (i, name, memcpy,
> +#ifdef __LITTLE_ENDIAN__
> +	      IFUNC_IMPL_ADD (array, i, memcpy,
> +			      hwcap2 & PPC_FEATURE2_ARCH_3_1
> +			      && hwcap & PPC_FEATURE_HAS_VSX,
> +			      __memcpy_power10)
> +#endif
>  	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap2 & PPC_FEATURE2_ARCH_2_07,
>  			      __memcpy_power8_cached)
>  	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_HAS_VSX,

ok.


> diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> new file mode 100644
> index 0000000000..70e0fc3ed6
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
> @@ -0,0 +1,26 @@
> +/* Optimized memcpy implementation for POWER10.
> +   Copyright (C) 2021 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
> +#define MEMCPY __memcpy_power10
> +
> +#undef libc_hidden_builtin_def
> +#define libc_hidden_builtin_def(name)
> +
> +#include <sysdeps/powerpc/powerpc64/le/power10/memcpy.S>
> +#endif

ok.


> diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> index 5733192932..53ab32ef26 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> +++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
> @@ -36,8 +36,15 @@ extern __typeof (__redirect_memcpy) __memcpy_power6 attribute_hidden;
>  extern __typeof (__redirect_memcpy) __memcpy_a2 attribute_hidden;
>  extern __typeof (__redirect_memcpy) __memcpy_power7 attribute_hidden;
>  extern __typeof (__redirect_memcpy) __memcpy_power8_cached attribute_hidden;
> +# if defined __LITTLE_ENDIAN__
> +extern __typeof (__redirect_memcpy) __memcpy_power10 attribute_hidden;
> +# endif
>  
>  libc_ifunc (__libc_memcpy,
> +# if defined __LITTLE_ENDIAN__
> +	    (hwcap2 & PPC_FEATURE2_ARCH_3_1 && hwcap & PPC_FEATURE_HAS_VSX)
> +	    ? __memcpy_power10 :
> +# endif
>  	    ((hwcap2 & PPC_FEATURE2_ARCH_2_07) && use_cached_memopt)
>  	    ? __memcpy_power8_cached :
>  	      (hwcap & PPC_FEATURE_HAS_VSX)
> -- 
> 2.30.2
> 

ok.


The only real question is the one on the Makefile diff, otherwise the
rest are not really important.

So, LGTM with the clarification on the Makefile.


o/
Raoni
  
Tulio Magno Quites Machado Filho April 30, 2021, 12:10 p.m. UTC | #4
Raoni Fassina Firmino via Libc-alpha <libc-alpha@sourceware.org> writes:

> On Tue, Apr 27, 2021 at 11:31:38PM -0300, AL glibc-alpha wrote:

> Not sure if subtract carrying (sub*c) that is used here and some places
> after is needed but don't hurt also.

The usages of subic. and subfic are indeed required, but I replaced subic with
subi.

> Should this label also be aligned with ".p2align 5" the same as the
> others?

It isn't mandatory.

> If memcpy-power10 is only for "le" should it be on the next section,
> inside this if:
>
>     ifneq (,$(filter %le,$(config-machine)))

Fixed.
  

Patch

diff --git a/sysdeps/powerpc/powerpc64/le/power10/memcpy.S b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
new file mode 100644
index 0000000000..f84acabec5
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power10/memcpy.S
@@ -0,0 +1,198 @@ 
+/* Optimized memcpy implementation for POWER10.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+
+#ifndef MEMCPY
+# define MEMCPY memcpy
+#endif
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+	   Returns 'dst'.  */
+
+	.machine power9
+ENTRY_TOCLESS (MEMCPY, 5)
+	CALL_MCOUNT 3
+
+	/* Copy up to 16 bytes.  */
+	sldi	r6,r5,56	/* Prepare [l|st]xvl counter.  */
+	lxvl	v10,r4,r6
+	stxvl	v10,r3,r6
+	subic.	r6,r5,16	/* Return if len <= 16.  */
+	blelr
+
+	/* If len >= 256, assume nothing got copied before and copy
+	   again.  This might cause issues with overlapped memory, but memcpy
+	   is not expected to treat overlapped memory.  */
+	cmpdi	r5,256
+	bge	L(copy_ge_256)
+	/* 16 < len < 256 and the first 16 bytes have already been copied.  */
+	addi	r10,r3,16	/* Keep r3 intact as return value.  */
+	addi	r4,r4,16
+	subic	r5,r5,16
+	b	L(copy_lt_256)	/* Avoid the main loop if len < 256.  */
+
+	.p2align 5
+L(copy_ge_256):
+	mr	r10,r3		/* Keep r3 intact as return value.  */
+	/* Align dst to 16 bytes.  */
+	andi.	r9,r10,0xf
+	beq	L(dst_is_align_16)
+	lxv	v10,0(r4)
+	subfic	r12,r9,16
+	subf	r5,r12,r5
+	add	r4,r4,r12
+	stxv	v10,0(r10)
+	add	r10,r10,r12
+
+L(dst_is_align_16):
+	srdi	r9,r5,7		/* Divide by 128.  */
+	mtctr	r9
+	addi	r6,r4,64
+	addi	r7,r10,64
+
+
+	/* Main loop, copy 128 bytes per iteration.
+	   Use r6=src+64 and r7=dest+64 in order to reduce the dependency on
+	   r4 and r10.  */
+	.p2align 5
+L(copy_128):
+
+	lxv	v10, 0(r4)
+	lxv	v11, 16(r4)
+	lxv	v12, 32(r4)
+	lxv	v13, 48(r4)
+
+	addi	r4,r4,128
+
+	stxv	v10, 0(r10)
+	stxv	v11, 16(r10)
+	stxv	v12, 32(r10)
+	stxv	v13, 48(r10)
+
+	addi	r10,r10,128
+
+	lxv	v10, 0(r6)
+	lxv	v11, 16(r6)
+	lxv	v12, 32(r6)
+	lxv	v13, 48(r6)
+
+	addi	r6,r6,128
+
+	stxv	v10, 0(r7)
+	stxv	v11, 16(r7)
+	stxv	v12, 32(r7)
+	stxv	v13, 48(r7)
+
+	addi	r7,r7,128
+
+	bdnz	L(copy_128)
+
+	clrldi.	r5,r5,64-7	/* Have we copied everything?  */
+	beqlr
+
+	.p2align 5
+L(copy_lt_256):
+	cmpdi	r5,16
+	ble	L(copy_le_16)
+	srdi.	r9,r5,5		/* Divide by 32.  */
+	beq	L(copy_lt_32)
+	mtctr	r9
+	/* Use r6=src+32, r7=dest+32, r8=src+64, r9=dest+64 in order to reduce
+	   the dependency on r4 and r10.  */
+	addi	r6,r4,32
+	addi	r7,r10,32
+	addi	r8,r4,64
+	addi	r9,r10,64
+
+	.p2align 5
+	/* Copy 32 bytes at a time, unaligned.
+	   The loop is unrolled 3 times in order to reduce the dependency on
+	   r4 and r10, copying up-to 96 bytes per iteration.  */
+L(copy_32):
+	lxv	v10, 0(r4)
+	lxv	v11, 16(r4)
+	stxv	v10, 0(r10)
+	stxv	v11, 16(r10)
+	bdz	L(end_copy_32a)
+	addi	r4,r4,96
+	addi	r10,r10,96
+
+	lxv	v10, 0(r6)
+	lxv	v11, 16(r6)
+	addi	r6,r6,96
+	stxv	v10, 0(r7)
+	stxv	v11, 16(r7)
+	bdz	L(end_copy_32b)
+	addi	r7,r7,96
+
+	lxv	v12, 0(r8)
+	lxv	v13, 16(r8)
+	addi	r8,r8,96
+	stxv	v12, 0(r9)
+	stxv	v13, 16(r9)
+	addi	r9,r9,96
+	bdnz	L(copy_32)
+
+	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
+	beqlr
+	cmpdi	r5,16
+	ble	L(copy_le_16)
+	b	L(copy_lt_32)
+
+	.p2align 5
+L(end_copy_32a):
+	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
+	beqlr
+	/* 32 bytes have been copied since the last update of r4 and r10.  */
+	addi	r4,r4,32
+	addi	r10,r10,32
+	cmpdi	r5,16
+	ble	L(copy_le_16)
+	b	L(copy_lt_32)
+
+	.p2align 5
+L(end_copy_32b):
+	clrldi.	r5,r5,64-5	/* Have we copied everything?  */
+	beqlr
+	/* The last iteration of the loop copied 64 bytes.  Update r4 and r10
+	   accordingly.  */
+	addi	r4,r4,-32
+	addi	r10,r10,-32
+	cmpdi	r5,16
+	ble	L(copy_le_16)
+
+	.p2align 5
+L(copy_lt_32):
+	lxv	v10, 0(r4)
+	stxv	v10, 0(r10)
+	addi	r4,r4,16
+	addi	r10,r10,16
+	subic	r5,r5,16
+
+	.p2align 5
+L(copy_le_16):
+	sldi	r6,r5,56
+	lxvl	v10,r4,r6
+	stxvl	v10,r10,r6
+	blr
+
+
+END_GEN_TB (MEMCPY,TB_TOCLESS)
+libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index 8aa46a3702..fdaa5ddb24 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -1,5 +1,6 @@ 
 ifeq ($(subdir),string)
-sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
+sysdep_routines += memcpy-power10 \
+		   memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
 		   memcpy-cell memcpy-power4 memcpy-ppc64 \
 		   memcmp-power8 memcmp-power7 memcmp-power4 memcmp-ppc64 \
 		   memset-power7 memset-power6 memset-power4 \
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index 1a6993616f..7bb3028676 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -51,6 +51,12 @@  __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 #ifdef SHARED
   /* Support sysdeps/powerpc/powerpc64/multiarch/memcpy.c.  */
   IFUNC_IMPL (i, name, memcpy,
+#ifdef __LITTLE_ENDIAN__
+	      IFUNC_IMPL_ADD (array, i, memcpy,
+			      hwcap2 & PPC_FEATURE2_ARCH_3_1
+			      && hwcap & PPC_FEATURE_HAS_VSX,
+			      __memcpy_power10)
+#endif
 	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap2 & PPC_FEATURE2_ARCH_2_07,
 			      __memcpy_power8_cached)
 	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_HAS_VSX,
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
new file mode 100644
index 0000000000..70e0fc3ed6
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power10.S
@@ -0,0 +1,26 @@ 
+/* Optimized memcpy implementation for POWER10.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
+#define MEMCPY __memcpy_power10
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/le/power10/memcpy.S>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
index 5733192932..53ab32ef26 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
@@ -36,8 +36,15 @@  extern __typeof (__redirect_memcpy) __memcpy_power6 attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_a2 attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_power7 attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_power8_cached attribute_hidden;
+# if defined __LITTLE_ENDIAN__
+extern __typeof (__redirect_memcpy) __memcpy_power10 attribute_hidden;
+# endif
 
 libc_ifunc (__libc_memcpy,
+# if defined __LITTLE_ENDIAN__
+	    (hwcap2 & PPC_FEATURE2_ARCH_3_1 && hwcap & PPC_FEATURE_HAS_VSX)
+	    ? __memcpy_power10 :
+# endif
 	    ((hwcap2 & PPC_FEATURE2_ARCH_2_07) && use_cached_memopt)
 	    ? __memcpy_power8_cached :
 	      (hwcap & PPC_FEATURE_HAS_VSX)