x86: Adding an upper bound for Enhanced REP MOVSB.

Message ID 20210122101850.3028846-1-sajan.karumanchi@amd.com
State Committed
Commit 6e02b3e9327b7dbb063958d2b124b64fcb4bbe3f
Headers
Series x86: Adding an upper bound for Enhanced REP MOVSB. |

Commit Message

develop--- via Libc-alpha Jan. 22, 2021, 10:18 a.m. UTC
  From: Sajan Karumanchi <sajan.karumanchi@amd.com>

In the process of optimizing memcpy for AMD machines, we have found the
vector move operations are outperforming enhanced REP MOVSB for data
transfers above the L2 cache size on Zen3 architectures.
To handle this use case, we are adding an upper bound parameter on
enhanced REP MOVSB:'__x86_rep_movsb_stop_threshold'.
As per large-bench results, we are configuring this parameter to the
L2 cache size for AMD machines and applicable from Zen3 architecture
supporting the ERMS feature.
For architectures other than AMD, it is the computed value of
non-temporal threshold parameter.

Reviewed-by: Premachandra Mallappa <premachandra.mallappa@amd.com>
---
 sysdeps/x86/cacheinfo.h                           |  4 ++++
 sysdeps/x86/dl-cacheinfo.h                        | 15 ++++++++++++++-
 sysdeps/x86/include/cpu-features.h                |  2 ++
 .../x86_64/multiarch/memmove-vec-unaligned-erms.S |  7 +++++--
 4 files changed, 25 insertions(+), 3 deletions(-)
  

Comments

H.J. Lu Feb. 1, 2021, 5:05 p.m. UTC | #1
On Fri, Jan 22, 2021 at 2:19 AM <sajan.karumanchi@amd.com> wrote:
>
> From: Sajan Karumanchi <sajan.karumanchi@amd.com>
>
> In the process of optimizing memcpy for AMD machines, we have found the
> vector move operations are outperforming enhanced REP MOVSB for data
> transfers above the L2 cache size on Zen3 architectures.
> To handle this use case, we are adding an upper bound parameter on
> enhanced REP MOVSB:'__x86_rep_movsb_stop_threshold'.
> As per large-bench results, we are configuring this parameter to the
> L2 cache size for AMD machines and applicable from Zen3 architecture
> supporting the ERMS feature.
> For architectures other than AMD, it is the computed value of
> non-temporal threshold parameter.
>
> Reviewed-by: Premachandra Mallappa <premachandra.mallappa@amd.com>
> ---
>  sysdeps/x86/cacheinfo.h                           |  4 ++++
>  sysdeps/x86/dl-cacheinfo.h                        | 15 ++++++++++++++-
>  sysdeps/x86/include/cpu-features.h                |  2 ++
>  .../x86_64/multiarch/memmove-vec-unaligned-erms.S |  7 +++++--
>  4 files changed, 25 insertions(+), 3 deletions(-)
>
> diff --git a/sysdeps/x86/cacheinfo.h b/sysdeps/x86/cacheinfo.h
> index 68c253542f..0f0ca7c08c 100644
> --- a/sysdeps/x86/cacheinfo.h
> +++ b/sysdeps/x86/cacheinfo.h
> @@ -54,6 +54,9 @@ long int __x86_rep_movsb_threshold attribute_hidden = 2048;
>  /* Threshold to use Enhanced REP STOSB.  */
>  long int __x86_rep_stosb_threshold attribute_hidden = 2048;
>
> +/* Threshold to stop using Enhanced REP MOVSB.  */
> +long int __x86_rep_movsb_stop_threshold attribute_hidden;
> +
>  static void
>  init_cacheinfo (void)
>  {
> @@ -79,5 +82,6 @@ init_cacheinfo (void)
>
>    __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
>    __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
> +  __x86_rep_movsb_stop_threshold =  cpu_features->rep_movsb_stop_threshold;
>  }
>  #endif
> diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> index a31fa0783a..374ba82467 100644
> --- a/sysdeps/x86/dl-cacheinfo.h
> +++ b/sysdeps/x86/dl-cacheinfo.h
> @@ -704,7 +704,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
>    int max_cpuid_ex;
>    long int data = -1;
>    long int shared = -1;
> -  long int core;
> +  long int core = -1;
>    unsigned int threads = 0;
>    unsigned long int level1_icache_size = -1;
>    unsigned long int level1_dcache_size = -1;
> @@ -886,6 +886,18 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
>  #endif
>      }
>
> +  unsigned long int rep_movsb_stop_threshold;
> +  /* ERMS feature is implemented from AMD Zen3 architecture and it is
> +     performing poorly for data above L2 cache size. Henceforth, adding
> +     an upper bound threshold parameter to limit the usage of Enhanced
> +     REP MOVSB operations and setting its value to L2 cache size.  */
> +  if (cpu_features->basic.kind == arch_kind_amd)
> +    rep_movsb_stop_threshold = core;
> +  /* Setting the upper bound of ERMS to the computed value of
> +     non-temporal threshold for architectures other than AMD.  */
> +  else
> +    rep_movsb_stop_threshold = non_temporal_threshold;
> +
>    /* The default threshold to use Enhanced REP STOSB.  */
>    unsigned long int rep_stosb_threshold = 2048;
>
> @@ -935,4 +947,5 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
>    cpu_features->non_temporal_threshold = non_temporal_threshold;
>    cpu_features->rep_movsb_threshold = rep_movsb_threshold;
>    cpu_features->rep_stosb_threshold = rep_stosb_threshold;
> +  cpu_features->rep_movsb_stop_threshold = rep_movsb_stop_threshold;
>  }
> diff --git a/sysdeps/x86/include/cpu-features.h b/sysdeps/x86/include/cpu-features.h
> index 624736b40e..475e877294 100644
> --- a/sysdeps/x86/include/cpu-features.h
> +++ b/sysdeps/x86/include/cpu-features.h
> @@ -870,6 +870,8 @@ struct cpu_features
>    unsigned long int non_temporal_threshold;
>    /* Threshold to use "rep movsb".  */
>    unsigned long int rep_movsb_threshold;
> +  /* Threshold to stop using "rep movsb".  */
> +  unsigned long int rep_movsb_stop_threshold;
>    /* Threshold to use "rep stosb".  */
>    unsigned long int rep_stosb_threshold;
>    /* _SC_LEVEL1_ICACHE_SIZE.  */
> diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> index 0980c95378..50bb1fccb2 100644
> --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> @@ -30,7 +30,10 @@
>        load and aligned store.  Load the last 4 * VEC and first VEC
>        before the loop and store them after the loop to support
>        overlapping addresses.
> -   6. If size >= __x86_shared_non_temporal_threshold and there is no
> +   6. On machines with ERMS feature, if size greater than equal or to
> +      __x86_rep_movsb_threshold and less than
> +      __x86_rep_movsb_stop_threshold, then REP MOVSB will be used.
> +   7. If size >= __x86_shared_non_temporal_threshold and there is no
>        overlap between destination and source, use non-temporal store
>        instead of aligned store.  */
>
> @@ -240,7 +243,7 @@ L(return):
>         ret
>
>  L(movsb):
> -       cmp     __x86_shared_non_temporal_threshold(%rip), %RDX_LP
> +       cmp     __x86_rep_movsb_stop_threshold(%rip), %RDX_LP
>         jae     L(more_8x_vec)
>         cmpq    %rsi, %rdi
>         jb      1f
> --
> 2.25.1
>

LGTM.   OK for 2.34.

Thanks.
  
Sunil Pandey April 27, 2022, 11:38 p.m. UTC | #2
On Mon, Feb 1, 2021 at 9:13 AM H.J. Lu via Libc-alpha
<libc-alpha@sourceware.org> wrote:
>
> On Fri, Jan 22, 2021 at 2:19 AM <sajan.karumanchi@amd.com> wrote:
> >
> > From: Sajan Karumanchi <sajan.karumanchi@amd.com>
> >
> > In the process of optimizing memcpy for AMD machines, we have found the
> > vector move operations are outperforming enhanced REP MOVSB for data
> > transfers above the L2 cache size on Zen3 architectures.
> > To handle this use case, we are adding an upper bound parameter on
> > enhanced REP MOVSB:'__x86_rep_movsb_stop_threshold'.
> > As per large-bench results, we are configuring this parameter to the
> > L2 cache size for AMD machines and applicable from Zen3 architecture
> > supporting the ERMS feature.
> > For architectures other than AMD, it is the computed value of
> > non-temporal threshold parameter.
> >
> > Reviewed-by: Premachandra Mallappa <premachandra.mallappa@amd.com>
> > ---
> >  sysdeps/x86/cacheinfo.h                           |  4 ++++
> >  sysdeps/x86/dl-cacheinfo.h                        | 15 ++++++++++++++-
> >  sysdeps/x86/include/cpu-features.h                |  2 ++
> >  .../x86_64/multiarch/memmove-vec-unaligned-erms.S |  7 +++++--
> >  4 files changed, 25 insertions(+), 3 deletions(-)
> >
> > diff --git a/sysdeps/x86/cacheinfo.h b/sysdeps/x86/cacheinfo.h
> > index 68c253542f..0f0ca7c08c 100644
> > --- a/sysdeps/x86/cacheinfo.h
> > +++ b/sysdeps/x86/cacheinfo.h
> > @@ -54,6 +54,9 @@ long int __x86_rep_movsb_threshold attribute_hidden = 2048;
> >  /* Threshold to use Enhanced REP STOSB.  */
> >  long int __x86_rep_stosb_threshold attribute_hidden = 2048;
> >
> > +/* Threshold to stop using Enhanced REP MOVSB.  */
> > +long int __x86_rep_movsb_stop_threshold attribute_hidden;
> > +
> >  static void
> >  init_cacheinfo (void)
> >  {
> > @@ -79,5 +82,6 @@ init_cacheinfo (void)
> >
> >    __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
> >    __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
> > +  __x86_rep_movsb_stop_threshold =  cpu_features->rep_movsb_stop_threshold;
> >  }
> >  #endif
> > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > index a31fa0783a..374ba82467 100644
> > --- a/sysdeps/x86/dl-cacheinfo.h
> > +++ b/sysdeps/x86/dl-cacheinfo.h
> > @@ -704,7 +704,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> >    int max_cpuid_ex;
> >    long int data = -1;
> >    long int shared = -1;
> > -  long int core;
> > +  long int core = -1;
> >    unsigned int threads = 0;
> >    unsigned long int level1_icache_size = -1;
> >    unsigned long int level1_dcache_size = -1;
> > @@ -886,6 +886,18 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> >  #endif
> >      }
> >
> > +  unsigned long int rep_movsb_stop_threshold;
> > +  /* ERMS feature is implemented from AMD Zen3 architecture and it is
> > +     performing poorly for data above L2 cache size. Henceforth, adding
> > +     an upper bound threshold parameter to limit the usage of Enhanced
> > +     REP MOVSB operations and setting its value to L2 cache size.  */
> > +  if (cpu_features->basic.kind == arch_kind_amd)
> > +    rep_movsb_stop_threshold = core;
> > +  /* Setting the upper bound of ERMS to the computed value of
> > +     non-temporal threshold for architectures other than AMD.  */
> > +  else
> > +    rep_movsb_stop_threshold = non_temporal_threshold;
> > +
> >    /* The default threshold to use Enhanced REP STOSB.  */
> >    unsigned long int rep_stosb_threshold = 2048;
> >
> > @@ -935,4 +947,5 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> >    cpu_features->non_temporal_threshold = non_temporal_threshold;
> >    cpu_features->rep_movsb_threshold = rep_movsb_threshold;
> >    cpu_features->rep_stosb_threshold = rep_stosb_threshold;
> > +  cpu_features->rep_movsb_stop_threshold = rep_movsb_stop_threshold;
> >  }
> > diff --git a/sysdeps/x86/include/cpu-features.h b/sysdeps/x86/include/cpu-features.h
> > index 624736b40e..475e877294 100644
> > --- a/sysdeps/x86/include/cpu-features.h
> > +++ b/sysdeps/x86/include/cpu-features.h
> > @@ -870,6 +870,8 @@ struct cpu_features
> >    unsigned long int non_temporal_threshold;
> >    /* Threshold to use "rep movsb".  */
> >    unsigned long int rep_movsb_threshold;
> > +  /* Threshold to stop using "rep movsb".  */
> > +  unsigned long int rep_movsb_stop_threshold;
> >    /* Threshold to use "rep stosb".  */
> >    unsigned long int rep_stosb_threshold;
> >    /* _SC_LEVEL1_ICACHE_SIZE.  */
> > diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > index 0980c95378..50bb1fccb2 100644
> > --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> > @@ -30,7 +30,10 @@
> >        load and aligned store.  Load the last 4 * VEC and first VEC
> >        before the loop and store them after the loop to support
> >        overlapping addresses.
> > -   6. If size >= __x86_shared_non_temporal_threshold and there is no
> > +   6. On machines with ERMS feature, if size greater than equal or to
> > +      __x86_rep_movsb_threshold and less than
> > +      __x86_rep_movsb_stop_threshold, then REP MOVSB will be used.
> > +   7. If size >= __x86_shared_non_temporal_threshold and there is no
> >        overlap between destination and source, use non-temporal store
> >        instead of aligned store.  */
> >
> > @@ -240,7 +243,7 @@ L(return):
> >         ret
> >
> >  L(movsb):
> > -       cmp     __x86_shared_non_temporal_threshold(%rip), %RDX_LP
> > +       cmp     __x86_rep_movsb_stop_threshold(%rip), %RDX_LP
> >         jae     L(more_8x_vec)
> >         cmpq    %rsi, %rdi
> >         jb      1f
> > --
> > 2.25.1
> >
>
> LGTM.   OK for 2.34.
>
> Thanks.
>
> --
> H.J.

I would like to backport this patch to release branches.
Any comments or objections?

--Sunil
  

Patch

diff --git a/sysdeps/x86/cacheinfo.h b/sysdeps/x86/cacheinfo.h
index 68c253542f..0f0ca7c08c 100644
--- a/sysdeps/x86/cacheinfo.h
+++ b/sysdeps/x86/cacheinfo.h
@@ -54,6 +54,9 @@  long int __x86_rep_movsb_threshold attribute_hidden = 2048;
 /* Threshold to use Enhanced REP STOSB.  */
 long int __x86_rep_stosb_threshold attribute_hidden = 2048;
 
+/* Threshold to stop using Enhanced REP MOVSB.  */
+long int __x86_rep_movsb_stop_threshold attribute_hidden;
+
 static void
 init_cacheinfo (void)
 {
@@ -79,5 +82,6 @@  init_cacheinfo (void)
 
   __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
   __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
+  __x86_rep_movsb_stop_threshold =  cpu_features->rep_movsb_stop_threshold;
 }
 #endif
diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
index a31fa0783a..374ba82467 100644
--- a/sysdeps/x86/dl-cacheinfo.h
+++ b/sysdeps/x86/dl-cacheinfo.h
@@ -704,7 +704,7 @@  dl_init_cacheinfo (struct cpu_features *cpu_features)
   int max_cpuid_ex;
   long int data = -1;
   long int shared = -1;
-  long int core;
+  long int core = -1;
   unsigned int threads = 0;
   unsigned long int level1_icache_size = -1;
   unsigned long int level1_dcache_size = -1;
@@ -886,6 +886,18 @@  dl_init_cacheinfo (struct cpu_features *cpu_features)
 #endif
     }
 
+  unsigned long int rep_movsb_stop_threshold;
+  /* ERMS feature is implemented from AMD Zen3 architecture and it is
+     performing poorly for data above L2 cache size. Henceforth, adding
+     an upper bound threshold parameter to limit the usage of Enhanced
+     REP MOVSB operations and setting its value to L2 cache size.  */
+  if (cpu_features->basic.kind == arch_kind_amd)
+    rep_movsb_stop_threshold = core;
+  /* Setting the upper bound of ERMS to the computed value of
+     non-temporal threshold for architectures other than AMD.  */
+  else
+    rep_movsb_stop_threshold = non_temporal_threshold;
+
   /* The default threshold to use Enhanced REP STOSB.  */
   unsigned long int rep_stosb_threshold = 2048;
 
@@ -935,4 +947,5 @@  dl_init_cacheinfo (struct cpu_features *cpu_features)
   cpu_features->non_temporal_threshold = non_temporal_threshold;
   cpu_features->rep_movsb_threshold = rep_movsb_threshold;
   cpu_features->rep_stosb_threshold = rep_stosb_threshold;
+  cpu_features->rep_movsb_stop_threshold = rep_movsb_stop_threshold;
 }
diff --git a/sysdeps/x86/include/cpu-features.h b/sysdeps/x86/include/cpu-features.h
index 624736b40e..475e877294 100644
--- a/sysdeps/x86/include/cpu-features.h
+++ b/sysdeps/x86/include/cpu-features.h
@@ -870,6 +870,8 @@  struct cpu_features
   unsigned long int non_temporal_threshold;
   /* Threshold to use "rep movsb".  */
   unsigned long int rep_movsb_threshold;
+  /* Threshold to stop using "rep movsb".  */
+  unsigned long int rep_movsb_stop_threshold;
   /* Threshold to use "rep stosb".  */
   unsigned long int rep_stosb_threshold;
   /* _SC_LEVEL1_ICACHE_SIZE.  */
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index 0980c95378..50bb1fccb2 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -30,7 +30,10 @@ 
       load and aligned store.  Load the last 4 * VEC and first VEC
       before the loop and store them after the loop to support
       overlapping addresses.
-   6. If size >= __x86_shared_non_temporal_threshold and there is no
+   6. On machines with ERMS feature, if size greater than equal or to
+      __x86_rep_movsb_threshold and less than
+      __x86_rep_movsb_stop_threshold, then REP MOVSB will be used.
+   7. If size >= __x86_shared_non_temporal_threshold and there is no
       overlap between destination and source, use non-temporal store
       instead of aligned store.  */
 
@@ -240,7 +243,7 @@  L(return):
 	ret
 
 L(movsb):
-	cmp	__x86_shared_non_temporal_threshold(%rip), %RDX_LP
+	cmp     __x86_rep_movsb_stop_threshold(%rip), %RDX_LP
 	jae	L(more_8x_vec)
 	cmpq	%rsi, %rdi
 	jb	1f