Patchwork Check ERMS in memmove/memcpy/mempcpy/memset

login
register
mail settings
Submitter H.J. Lu
Date June 30, 2016, 2:55 p.m.
Message ID <CAMe9rOoVhfAkbVav0z9wtpwqu_j1Sc--DTXqkiAqEgVcc-D=JA@mail.gmail.com>
Download mbox | patch
Permalink /patch/13521/
State New
Headers show

Comments

H.J. Lu - June 30, 2016, 2:55 p.m.
On Wed, Jun 29, 2016 at 11:42 AM, H.J. Lu <hongjiu.lu@intel.com> wrote:
> Although the Enhanced REP MOVSB/STOSB (ERMS) implementations of memmove,
> memcpy, mempcpy and memset aren't used by the current processors, this
> patch adds Prefer_ERMS check in memmove, memcpy, mempcpy and memset so
> that glibc developers can experiment with it using GLIBC_IFUNC.
>
> OK for master?
>
> H.J.
> ---
>         * sysdeps/x86/cpu-features.c (init_cpu_features): Also
>         check Prefer_ERMS.
>         * sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New.
>         (index_arch_Prefer_ERMS): Likewise.
>         * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return
>         __memcpy_erms for Prefer_ERMS.
>         * sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
>         (__memmove_erms): Enabled for libc.a.
>         * ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return
>         __memmove_erms or Prefer_ERMS.
>         * sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return
>         __mempcpy_erms for Prefer_ERMS.
>         * sysdeps/x86_64/multiarch/memset.S (memset): Return
>         __memset_erms for Prefer_ERMS.

I am checking in this patch without cpu-features.c change.

Patch

From bdf594e6fa52ab2bf4d632a815181512a769fb7b Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Mon, 27 Jun 2016 12:27:49 -0700
Subject: [PATCH] Check Prefer_ERMS in memmove/memcpy/mempcpy/memset

Although the Enhanced REP MOVSB/STOSB (ERMS) implementations of memmove,
memcpy, mempcpy and memset aren't used by the current processors, this
patch adds Prefer_ERMS check in memmove, memcpy, mempcpy and memset so
that they can be used in the future.

	* sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New.
	(index_arch_Prefer_ERMS): Likewise.
	* sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return
	__memcpy_erms for Prefer_ERMS.
	* sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
	(__memmove_erms): Enabled for libc.a.
	* ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return
	__memmove_erms or Prefer_ERMS.
	* sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return
	__mempcpy_erms for Prefer_ERMS.
	* sysdeps/x86_64/multiarch/memset.S (memset): Return
	__memset_erms for Prefer_ERMS.
---
 sysdeps/x86/cpu-features.h                            | 3 +++
 sysdeps/x86_64/multiarch/memcpy.S                     | 3 +++
 sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S | 6 +++++-
 sysdeps/x86_64/multiarch/memmove.S                    | 3 +++
 sysdeps/x86_64/multiarch/mempcpy.S                    | 3 +++
 sysdeps/x86_64/multiarch/memset.S                     | 3 +++
 6 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
index 2bd9371..97ffe76 100644
--- a/sysdeps/x86/cpu-features.h
+++ b/sysdeps/x86/cpu-features.h
@@ -36,6 +36,7 @@ 
 #define bit_arch_Prefer_MAP_32BIT_EXEC		(1 << 16)
 #define bit_arch_Prefer_No_VZEROUPPER		(1 << 17)
 #define bit_arch_Fast_Unaligned_Copy		(1 << 18)
+#define bit_arch_Prefer_ERMS			(1 << 19)
 
 /* CPUID Feature flags.  */
 
@@ -105,6 +106,7 @@ 
 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE
 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE
 # define index_arch_Fast_Unaligned_Copy	FEATURE_INDEX_1*FEATURE_SIZE
+# define index_arch_Prefer_ERMS		FEATURE_INDEX_1*FEATURE_SIZE
 
 
 # if defined (_LIBC) && !IS_IN (nonlib)
@@ -274,6 +276,7 @@  extern const struct cpu_features *__get_cpu_features (void)
 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1
 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1
 # define index_arch_Fast_Unaligned_Copy	FEATURE_INDEX_1
+# define index_arch_Prefer_ERMS		FEATURE_INDEX_1
 
 #endif	/* !__ASSEMBLER__ */
 
diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S
index f6771a4..df7fbac 100644
--- a/sysdeps/x86_64/multiarch/memcpy.S
+++ b/sysdeps/x86_64/multiarch/memcpy.S
@@ -29,6 +29,9 @@ 
 ENTRY(__new_memcpy)
 	.type	__new_memcpy, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
+	lea	__memcpy_erms(%rip), %RAX_LP
+	HAS_ARCH_FEATURE (Prefer_ERMS)
+	jnz	2f
 # ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index a2cce39..4893ea4 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -150,13 +150,15 @@  L(nop):
 #if defined USE_MULTIARCH && IS_IN (libc)
 END (MEMMOVE_SYMBOL (__memmove, unaligned))
 
-# if VEC_SIZE == 16 && defined SHARED
+# if VEC_SIZE == 16
+#  if defined SHARED
 /* Only used to measure performance of REP MOVSB.  */
 ENTRY (__mempcpy_erms)
 	movq	%rdi, %rax
 	addq	%rdx, %rax
 	jmp	L(start_movsb)
 END (__mempcpy_erms)
+#  endif
 
 ENTRY (__memmove_erms)
 	movq	%rdi, %rax
@@ -181,7 +183,9 @@  L(movsb_backward):
 	cld
 	ret
 END (__memmove_erms)
+#  if defined SHARED
 strong_alias (__memmove_erms, __memcpy_erms)
+#  endif
 # endif
 
 # ifdef SHARED
diff --git a/sysdeps/x86_64/multiarch/memmove.S b/sysdeps/x86_64/multiarch/memmove.S
index 25c3586..8e1c6ac 100644
--- a/sysdeps/x86_64/multiarch/memmove.S
+++ b/sysdeps/x86_64/multiarch/memmove.S
@@ -27,6 +27,9 @@ 
 ENTRY(__libc_memmove)
 	.type	__libc_memmove, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
+	lea	__memmove_erms(%rip), %RAX_LP
+	HAS_ARCH_FEATURE (Prefer_ERMS)
+	jnz	2f
 # ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
diff --git a/sysdeps/x86_64/multiarch/mempcpy.S b/sysdeps/x86_64/multiarch/mempcpy.S
index f9c6df3..4011a1a 100644
--- a/sysdeps/x86_64/multiarch/mempcpy.S
+++ b/sysdeps/x86_64/multiarch/mempcpy.S
@@ -29,6 +29,9 @@ 
 ENTRY(__mempcpy)
 	.type	__mempcpy, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
+	lea	__mempcpy_erms(%rip), %RAX_LP
+	HAS_ARCH_FEATURE (Prefer_ERMS)
+	jnz	2f
 # ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
diff --git a/sysdeps/x86_64/multiarch/memset.S b/sysdeps/x86_64/multiarch/memset.S
index 4e52d8f..2b964a0 100644
--- a/sysdeps/x86_64/multiarch/memset.S
+++ b/sysdeps/x86_64/multiarch/memset.S
@@ -26,6 +26,9 @@ 
 ENTRY(memset)
 	.type	memset, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
+	lea	__memset_erms(%rip), %RAX_LP
+	HAS_ARCH_FEATURE (Prefer_ERMS)
+	jnz	2f
 	lea	__memset_sse2_unaligned_erms(%rip), %RAX_LP
 	HAS_CPU_FEATURE (ERMS)
 	jnz	1f
-- 
2.7.4