Patchwork [x86_64] Update memcpy, mempcpy and memmove selection order for Excavator CPU BZ #19583

login
register
mail settings
Submitter H.J. Lu
Date March 23, 2016, 5:59 p.m.
Message ID <CAMe9rOpencHVdypLMYSBGy0fA7iSVTb1+NwokoudUKC7v7_zeA@mail.gmail.com>
Download mbox | patch
Permalink /patch/11494/
State New
Headers show

Comments

H.J. Lu - March 23, 2016, 5:59 p.m.
On Wed, Mar 23, 2016 at 3:12 AM, Pawar, Amit <Amit.Pawar@amd.com> wrote:
>> Then we should add Fast_Unaligned_Copy and only use it in memcpy.
> PFA patch and ChangeLog files containing fix for memcpy IFUNC function. Is it OK else please suggest for any required changes.
>

It isn't OK.  Try this.
Pawar, Amit - March 28, 2016, 7:43 a.m.
>It isn't OK.  Try this.

This is OK. Can you please commit this change?

Thanks,
Amit Pawar
H.J. Lu - March 28, 2016, 12:12 p.m.
On Mon, Mar 28, 2016 at 12:43 AM, Pawar, Amit <Amit.Pawar@amd.com> wrote:
>>It isn't OK.  Try this.
> This is OK. Can you please commit this change?
>
> Thanks,
> Amit Pawar

Tested on ia32 and x86-64.  I am checking it in.

Patch

From 327aadf6348bd41d1fae46ee7780e214c0a493c1 Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Wed, 23 Mar 2016 10:33:19 -0700
Subject: [PATCH] [x86] Add a feature bit: Fast_Unaligned_Copy

On AMD processors, memcpy optimized with unaligned SSE load is
slower than emcpy optimized with aligned SSSE3 while other string
functions are faster with unaligned SSE load.  A feature bit,
Fast_Unaligned_Copy, is added to select memcpy optimized with
unaligned SSE load.

	[BZ #19583]
	* sysdeps/x86/cpu-features.c (init_cpu_features): Set
	Fast_Unaligned_Copy with Fast_Unaligned_Load for Intel
	processors.  Set Fast_Copy_Backward for AMD Excavator
	processors.
	* sysdeps/x86/cpu-features.h (bit_arch_Fast_Unaligned_Copy):
	New.
	(index_arch_Fast_Unaligned_Copy): Likewise.
	* sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Check
	Fast_Unaligned_Copy instead of Fast_Unaligned_Load.
---
 sysdeps/x86/cpu-features.c        | 14 +++++++++++++-
 sysdeps/x86/cpu-features.h        |  3 +++
 sysdeps/x86_64/multiarch/memcpy.S |  2 +-
 3 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
index c8f81ef..de75c79 100644
--- a/sysdeps/x86/cpu-features.c
+++ b/sysdeps/x86/cpu-features.c
@@ -153,8 +153,12 @@  init_cpu_features (struct cpu_features *cpu_features)
 #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
 # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
 #endif
+#if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
+# error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
+#endif
 	      cpu_features->feature[index_arch_Fast_Unaligned_Load]
 		|= (bit_arch_Fast_Unaligned_Load
+		    | bit_arch_Fast_Unaligned_Copy
 		    | bit_arch_Prefer_PMINUB_for_stringop
 		    | bit_arch_Slow_SSE4_2);
 	      break;
@@ -183,10 +187,14 @@  init_cpu_features (struct cpu_features *cpu_features)
 #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
 # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
 #endif
+#if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
+# error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
+#endif
 	      cpu_features->feature[index_arch_Fast_Rep_String]
 		|= (bit_arch_Fast_Rep_String
 		    | bit_arch_Fast_Copy_Backward
 		    | bit_arch_Fast_Unaligned_Load
+		    | bit_arch_Fast_Unaligned_Copy
 		    | bit_arch_Prefer_PMINUB_for_stringop);
 	      break;
 	    }
@@ -220,10 +228,14 @@  init_cpu_features (struct cpu_features *cpu_features)
 
       if (family == 0x15)
 	{
+#if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
+# error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
+#endif
 	  /* "Excavator"   */
 	  if (model >= 0x60 && model <= 0x7f)
 	    cpu_features->feature[index_arch_Fast_Unaligned_Load]
-	      |= bit_arch_Fast_Unaligned_Load;
+	      |= (bit_arch_Fast_Unaligned_Load
+		  | bit_arch_Fast_Copy_Backward);
 	}
     }
   else
diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
index e06eb7e..bfe1f4c 100644
--- a/sysdeps/x86/cpu-features.h
+++ b/sysdeps/x86/cpu-features.h
@@ -35,6 +35,7 @@ 
 #define bit_arch_I686				(1 << 15)
 #define bit_arch_Prefer_MAP_32BIT_EXEC		(1 << 16)
 #define bit_arch_Prefer_No_VZEROUPPER		(1 << 17)
+#define bit_arch_Fast_Unaligned_Copy		(1 << 18)
 
 /* CPUID Feature flags.  */
 
@@ -101,6 +102,7 @@ 
 # define index_arch_I686		FEATURE_INDEX_1*FEATURE_SIZE
 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE
 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE
+# define index_arch_Fast_Unaligned_Copy	FEATURE_INDEX_1*FEATURE_SIZE
 
 
 # if defined (_LIBC) && !IS_IN (nonlib)
@@ -265,6 +267,7 @@  extern const struct cpu_features *__get_cpu_features (void)
 # define index_arch_I686		FEATURE_INDEX_1
 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1
 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1
+# define index_arch_Fast_Unaligned_Copy	FEATURE_INDEX_1
 
 #endif	/* !__ASSEMBLER__ */
 
diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S
index 8882590..5b045d7 100644
--- a/sysdeps/x86_64/multiarch/memcpy.S
+++ b/sysdeps/x86_64/multiarch/memcpy.S
@@ -42,7 +42,7 @@  ENTRY(__new_memcpy)
 	HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
 	jnz	2f
 	lea	__memcpy_sse2_unaligned(%rip), %RAX_LP
-	HAS_ARCH_FEATURE (Fast_Unaligned_Load)
+	HAS_ARCH_FEATURE (Fast_Unaligned_Copy)
 	jnz	2f
 	lea	__memcpy_sse2(%rip), %RAX_LP
 	HAS_CPU_FEATURE (SSSE3)
-- 
2.5.5