Check ERMS in memmove/memcpy/mempcpy/memset

Message ID 20160629184253.GA3534@intel.com
State New, archived
Headers

Commit Message

Lu, Hongjiu June 29, 2016, 6:42 p.m. UTC
  Although the Enhanced REP MOVSB/STOSB (ERMS) implementations of memmove,
memcpy, mempcpy and memset aren't used by the current processors, this
patch adds Prefer_ERMS check in memmove, memcpy, mempcpy and memset so
that glibc developers can experiment with it using GLIBC_IFUNC.

OK for master?

H.J.
---
	* sysdeps/x86/cpu-features.c (init_cpu_features): Also
	check Prefer_ERMS.
	* sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New.
	(index_arch_Prefer_ERMS): Likewise.
	* sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return
	__memcpy_erms for Prefer_ERMS.
	* sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
	(__memmove_erms): Enabled for libc.a.
	* ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return
	__memmove_erms or Prefer_ERMS.
	* sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return
	__mempcpy_erms for Prefer_ERMS.
	* sysdeps/x86_64/multiarch/memset.S (memset): Return
	__memset_erms for Prefer_ERMS.
---
 sysdeps/x86/cpu-features.c                            | 1 +
 sysdeps/x86/cpu-features.h                            | 3 +++
 sysdeps/x86_64/multiarch/memcpy.S                     | 3 +++
 sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S | 6 +++++-
 sysdeps/x86_64/multiarch/memmove.S                    | 3 +++
 sysdeps/x86_64/multiarch/mempcpy.S                    | 3 +++
 sysdeps/x86_64/multiarch/memset.S                     | 3 +++
 7 files changed, 21 insertions(+), 1 deletion(-)
  

Comments

Ondrej Bilka July 14, 2016, 1:22 p.m. UTC | #1
On Wed, Jun 29, 2016 at 11:42:53AM -0700, H.J. Lu wrote:
> Although the Enhanced REP MOVSB/STOSB (ERMS) implementations of memmove,
> memcpy, mempcpy and memset aren't used by the current processors, this
> patch adds Prefer_ERMS check in memmove, memcpy, mempcpy and memset so
> that glibc developers can experiment with it using GLIBC_IFUNC.
> 
> OK for master?
> 
look ok for me.
  

Patch

diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
index 0e5765e..d506aa5 100644
--- a/sysdeps/x86/cpu-features.c
+++ b/sysdeps/x86/cpu-features.c
@@ -473,6 +473,7 @@  no_cpuid:
 		  case 13:
 		    CHECK_GLIBC_IFUNC_ARCH_OFF (AVX2_Usable);
 		    CHECK_GLIBC_IFUNC_ARCH_OFF (FMA4_Usable);
+		    CHECK_GLIBC_IFUNC_ARCH_BOTH (Prefer_ERMS);
 		    CHECK_GLIBC_IFUNC_ARCH_NEED_CPU_BOTH (Slow_SSE4_2,
 							  SSE4_2);
 		    break;
diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
index 2bd9371..97ffe76 100644
--- a/sysdeps/x86/cpu-features.h
+++ b/sysdeps/x86/cpu-features.h
@@ -36,6 +36,7 @@ 
 #define bit_arch_Prefer_MAP_32BIT_EXEC		(1 << 16)
 #define bit_arch_Prefer_No_VZEROUPPER		(1 << 17)
 #define bit_arch_Fast_Unaligned_Copy		(1 << 18)
+#define bit_arch_Prefer_ERMS			(1 << 19)
 
 /* CPUID Feature flags.  */
 
@@ -105,6 +106,7 @@ 
 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE
 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE
 # define index_arch_Fast_Unaligned_Copy	FEATURE_INDEX_1*FEATURE_SIZE
+# define index_arch_Prefer_ERMS		FEATURE_INDEX_1*FEATURE_SIZE
 
 
 # if defined (_LIBC) && !IS_IN (nonlib)
@@ -274,6 +276,7 @@  extern const struct cpu_features *__get_cpu_features (void)
 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1
 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1
 # define index_arch_Fast_Unaligned_Copy	FEATURE_INDEX_1
+# define index_arch_Prefer_ERMS		FEATURE_INDEX_1
 
 #endif	/* !__ASSEMBLER__ */
 
diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S
index f6771a4..df7fbac 100644
--- a/sysdeps/x86_64/multiarch/memcpy.S
+++ b/sysdeps/x86_64/multiarch/memcpy.S
@@ -29,6 +29,9 @@ 
 ENTRY(__new_memcpy)
 	.type	__new_memcpy, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
+	lea	__memcpy_erms(%rip), %RAX_LP
+	HAS_ARCH_FEATURE (Prefer_ERMS)
+	jnz	2f
 # ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index a2cce39..4893ea4 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -150,13 +150,15 @@  L(nop):
 #if defined USE_MULTIARCH && IS_IN (libc)
 END (MEMMOVE_SYMBOL (__memmove, unaligned))
 
-# if VEC_SIZE == 16 && defined SHARED
+# if VEC_SIZE == 16
+#  if defined SHARED
 /* Only used to measure performance of REP MOVSB.  */
 ENTRY (__mempcpy_erms)
 	movq	%rdi, %rax
 	addq	%rdx, %rax
 	jmp	L(start_movsb)
 END (__mempcpy_erms)
+#  endif
 
 ENTRY (__memmove_erms)
 	movq	%rdi, %rax
@@ -181,7 +183,9 @@  L(movsb_backward):
 	cld
 	ret
 END (__memmove_erms)
+#  if defined SHARED
 strong_alias (__memmove_erms, __memcpy_erms)
+#  endif
 # endif
 
 # ifdef SHARED
diff --git a/sysdeps/x86_64/multiarch/memmove.S b/sysdeps/x86_64/multiarch/memmove.S
index 25c3586..8e1c6ac 100644
--- a/sysdeps/x86_64/multiarch/memmove.S
+++ b/sysdeps/x86_64/multiarch/memmove.S
@@ -27,6 +27,9 @@ 
 ENTRY(__libc_memmove)
 	.type	__libc_memmove, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
+	lea	__memmove_erms(%rip), %RAX_LP
+	HAS_ARCH_FEATURE (Prefer_ERMS)
+	jnz	2f
 # ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
diff --git a/sysdeps/x86_64/multiarch/mempcpy.S b/sysdeps/x86_64/multiarch/mempcpy.S
index f9c6df3..4011a1a 100644
--- a/sysdeps/x86_64/multiarch/mempcpy.S
+++ b/sysdeps/x86_64/multiarch/mempcpy.S
@@ -29,6 +29,9 @@ 
 ENTRY(__mempcpy)
 	.type	__mempcpy, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
+	lea	__mempcpy_erms(%rip), %RAX_LP
+	HAS_ARCH_FEATURE (Prefer_ERMS)
+	jnz	2f
 # ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
diff --git a/sysdeps/x86_64/multiarch/memset.S b/sysdeps/x86_64/multiarch/memset.S
index 4e52d8f..2b964a0 100644
--- a/sysdeps/x86_64/multiarch/memset.S
+++ b/sysdeps/x86_64/multiarch/memset.S
@@ -26,6 +26,9 @@ 
 ENTRY(memset)
 	.type	memset, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
+	lea	__memset_erms(%rip), %RAX_LP
+	HAS_ARCH_FEATURE (Prefer_ERMS)
+	jnz	2f
 	lea	__memset_sse2_unaligned_erms(%rip), %RAX_LP
 	HAS_CPU_FEATURE (ERMS)
 	jnz	1f