[2/2] x86: Add thresholds for "rep movsb/stosb" to tunables
Commit Message
Add x86_rep_movsb_threshold and x86_rep_stosb_threshold to tunables
to update thresholds for "rep movsb" and "rep stosb" at run-time.
Note that the user specified threshold for "rep movsb" smaller than the
minimum threshold will be ignored.
---
manual/tunables.texi | 14 +++++++
sysdeps/x86/cacheinfo.c | 20 ++++++++++
sysdeps/x86/cpu-features.h | 4 ++
sysdeps/x86/dl-cacheinfo.c | 38 +++++++++++++++++++
sysdeps/x86/dl-tunables.list | 6 +++
.../multiarch/memmove-vec-unaligned-erms.S | 16 +-------
.../multiarch/memset-vec-unaligned-erms.S | 12 +-----
7 files changed, 84 insertions(+), 26 deletions(-)
Comments
On 7/3/20 1:52 PM, H.J. Lu wrote:
> Add x86_rep_movsb_threshold and x86_rep_stosb_threshold to tunables
> to update thresholds for "rep movsb" and "rep stosb" at run-time.
>
> Note that the user specified threshold for "rep movsb" smaller than the
> minimum threshold will be ignored.
Post v2 please. Almost there.
> ---
> manual/tunables.texi | 14 +++++++
> sysdeps/x86/cacheinfo.c | 20 ++++++++++
> sysdeps/x86/cpu-features.h | 4 ++
> sysdeps/x86/dl-cacheinfo.c | 38 +++++++++++++++++++
> sysdeps/x86/dl-tunables.list | 6 +++
> .../multiarch/memmove-vec-unaligned-erms.S | 16 +-------
> .../multiarch/memset-vec-unaligned-erms.S | 12 +-----
> 7 files changed, 84 insertions(+), 26 deletions(-)
>
> diff --git a/manual/tunables.texi b/manual/tunables.texi
> index ec18b10834..61edd62425 100644
> --- a/manual/tunables.texi
> +++ b/manual/tunables.texi
> @@ -396,6 +396,20 @@ to set threshold in bytes for non temporal store.
> This tunable is specific to i386 and x86-64.
> @end deftp
>
> +@deftp Tunable glibc.cpu.x86_rep_movsb_threshold
> +The @code{glibc.cpu.x86_rep_movsb_threshold} tunable allows the user
> +to set threshold in bytes to start using "rep movsb".
> +
> +This tunable is specific to i386 and x86-64.
> +@end deftp
> +
> +@deftp Tunable glibc.cpu.x86_rep_stosb_threshold
> +The @code{glibc.cpu.x86_rep_stosb_threshold} tunable allows the user
> +to set threshold in bytes to start using "rep stosb".
> +
> +This tunable is specific to i386 and x86-64.
> +@end deftp
> +
> @deftp Tunable glibc.cpu.x86_ibt
> The @code{glibc.cpu.x86_ibt} tunable allows the user to control how
> indirect branch tracking (IBT) should be enabled. Accepted values are
> diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c
> index 8c4c7f9972..bb536d96ef 100644
> --- a/sysdeps/x86/cacheinfo.c
> +++ b/sysdeps/x86/cacheinfo.c
> @@ -41,6 +41,23 @@ long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;
> /* Threshold to use non temporal store. */
> long int __x86_shared_non_temporal_threshold attribute_hidden;
>
> +/* Threshold to use Enhanced REP MOVSB. Since there is overhead to set
> + up REP MOVSB operation, REP MOVSB isn't faster on short data. The
> + memcpy micro benchmark in glibc shows that 2KB is the approximate
> + value above which REP MOVSB becomes faster than SSE2 optimization
> + on processors with Enhanced REP MOVSB. Since larger register size
> + can move more data with a single load and store, the threshold is
> + higher with larger register size. */
> +long int __x86_rep_movsb_threshold attribute_hidden = 2048;
> +
> +/* Threshold to use Enhanced REP STOSB. Since there is overhead to set
> + up REP STOSB operation, REP STOSB isn't faster on short data. The
> + memset micro benchmark in glibc shows that 2KB is the approximate
> + value above which REP STOSB becomes faster on processors with
> + Enhanced REP STOSB. Since the stored value is fixed, larger register
> + size has minimal impact on threshold. */
> +long int __x86_rep_stosb_threshold attribute_hidden = 2048;
> +
> #ifndef __x86_64__
> /* PREFETCHW support flag for use in memory and string routines. */
> int __x86_prefetchw attribute_hidden;
> @@ -117,6 +134,9 @@ init_cacheinfo (void)
> __x86_shared_non_temporal_threshold
> = cpu_features->non_temporal_threshold;
>
> + __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
> + __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
> +
OK. Update global from cpu_features with values.
I would really like to see some kind of "assert (cpu_features->initialized);"
that way we know we didn't break the startup sequence unintentionally.
> #ifndef __x86_64__
> __x86_prefetchw = cpu_features->prefetchw;
> #endif
> diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
> index 3aaed33cbc..002e12e11f 100644
> --- a/sysdeps/x86/cpu-features.h
> +++ b/sysdeps/x86/cpu-features.h
> @@ -128,6 +128,10 @@ struct cpu_features
> /* PREFETCHW support flag for use in memory and string routines. */
> unsigned long int prefetchw;
> #endif
> + /* Threshold to use "rep movsb". */
> + unsigned long int rep_movsb_threshold;
> + /* Threshold to use "rep stosb". */
> + unsigned long int rep_stosb_threshold;
OK.
> };
>
> /* Used from outside of glibc to get access to the CPU features
> diff --git a/sysdeps/x86/dl-cacheinfo.c b/sysdeps/x86/dl-cacheinfo.c
> index 8e2a6f552c..aff9bd1067 100644
> --- a/sysdeps/x86/dl-cacheinfo.c
> +++ b/sysdeps/x86/dl-cacheinfo.c
> @@ -860,6 +860,31 @@ __init_cacheinfo (void)
> total shared cache size. */
> unsigned long int non_temporal_threshold = (shared * threads * 3 / 4);
>
> + /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8. */
> + unsigned long int minimum_rep_movsb_threshold;
> + /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16). See
> + comments for __x86_rep_movsb_threshold in cacheinfo.c. */
> + unsigned long int rep_movsb_threshold;
> + if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
> + && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
> + {
> + rep_movsb_threshold = 2048 * (64 / 16);
> + minimum_rep_movsb_threshold = 64 * 8;
> + }
> + else if (CPU_FEATURES_ARCH_P (cpu_features,
> + AVX_Fast_Unaligned_Load))
> + {
> + rep_movsb_threshold = 2048 * (32 / 16);
> + minimum_rep_movsb_threshold = 32 * 8;
> + }
> + else
> + {
> + rep_movsb_threshold = 2048 * (16 / 16);
> + minimum_rep_movsb_threshold = 16 * 8;
> + }
> + /* NB: See comments for __x86_rep_stosb_threshold in cacheinfo.c. */
> + unsigned long int rep_stosb_threshold = 2048;
> +
> #if HAVE_TUNABLES
> long int tunable_size;
> tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);
> @@ -871,11 +896,19 @@ __init_cacheinfo (void)
> tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
> if (tunable_size != 0)
> non_temporal_threshold = tunable_size;
> + tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
> + if (tunable_size > minimum_rep_movsb_threshold)
> + rep_movsb_threshold = tunable_size;
OK. Good, we only set rep_movsb_threshold if it's greater than min.
> + tunable_size = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);
> + if (tunable_size != 0)
> + rep_stosb_threshold = tunable_size;
This should be min=1, default=2048 in dl-tunables.list, and would remove
this code since the range is not dynamic.
The point of the tunables framework is to remove such boiler plate for
range a default processing and clearing parameters for security settings.
> #endif
>
> cpu_features->data_cache_size = data;
> cpu_features->shared_cache_size = shared;
> cpu_features->non_temporal_threshold = non_temporal_threshold;
> + cpu_features->rep_movsb_threshold = rep_movsb_threshold;
> + cpu_features->rep_stosb_threshold = rep_stosb_threshold;
>
> #if HAVE_TUNABLES
> TUNABLE_UPDATE (x86_data_cache_size, long int,
> @@ -884,5 +917,10 @@ __init_cacheinfo (void)
> shared, 0, (long int) -1);
> TUNABLE_UPDATE (x86_non_temporal_threshold, long int,
> non_temporal_threshold, 0, (long int) -1);
> + TUNABLE_UPDATE (x86_rep_movsb_threshold, long int,
> + rep_movsb_threshold, minimum_rep_movsb_threshold,
> + (long int) -1);
OK. Store the new value and the computed minimum.
> + TUNABLE_UPDATE (x86_rep_stosb_threshold, long int,
> + rep_stosb_threshold, 0, (long int) -1);
This one can be deleted.
> #endif
> }
> diff --git a/sysdeps/x86/dl-tunables.list b/sysdeps/x86/dl-tunables.list
> index 251b926ce4..43bf6c2389 100644
> --- a/sysdeps/x86/dl-tunables.list
> +++ b/sysdeps/x86/dl-tunables.list
> @@ -30,6 +30,12 @@ glibc {
> x86_non_temporal_threshold {
> type: SIZE_T
> }
> + x86_rep_movsb_threshold {
> + type: SIZE_T
> + }
> + x86_rep_stosb_threshold {
> + type: SIZE_T
min: 1
default: 2048
> + }
> x86_data_cache_size {
> type: SIZE_T
> }
> diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> index 74953245aa..bd5dc1a3f3 100644
> --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> @@ -56,17 +56,6 @@
> # endif
> #endif
>
> -/* Threshold to use Enhanced REP MOVSB. Since there is overhead to set
> - up REP MOVSB operation, REP MOVSB isn't faster on short data. The
> - memcpy micro benchmark in glibc shows that 2KB is the approximate
> - value above which REP MOVSB becomes faster than SSE2 optimization
> - on processors with Enhanced REP MOVSB. Since larger register size
> - can move more data with a single load and store, the threshold is
> - higher with larger register size. */
> -#ifndef REP_MOVSB_THRESHOLD
> -# define REP_MOVSB_THRESHOLD (2048 * (VEC_SIZE / 16))> -#endif
OK.
> -
> #ifndef PREFETCH
> # define PREFETCH(addr) prefetcht0 addr
> #endif
> @@ -253,9 +242,6 @@ L(movsb):
> leaq (%rsi,%rdx), %r9
> cmpq %r9, %rdi
> /* Avoid slow backward REP MOVSB. */
> -# if REP_MOVSB_THRESHOLD <= (VEC_SIZE * 8)
> -# error Unsupported REP_MOVSB_THRESHOLD and VEC_SIZE!
> -# endif
OK.
> jb L(more_8x_vec_backward)
> 1:
> mov %RDX_LP, %RCX_LP
> @@ -331,7 +317,7 @@ L(between_2_3):
>
> #if defined USE_MULTIARCH && IS_IN (libc)
> L(movsb_more_2x_vec):
> - cmpq $REP_MOVSB_THRESHOLD, %rdx
> + cmp __x86_rep_movsb_threshold(%rip), %RDX_LP
OK.
> ja L(movsb)
> #endif
> L(more_2x_vec):
> diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> index af2299709c..2bfc95de05 100644
> --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> @@ -58,16 +58,6 @@
> # endif
> #endif
>
> -/* Threshold to use Enhanced REP STOSB. Since there is overhead to set
> - up REP STOSB operation, REP STOSB isn't faster on short data. The
> - memset micro benchmark in glibc shows that 2KB is the approximate
> - value above which REP STOSB becomes faster on processors with
> - Enhanced REP STOSB. Since the stored value is fixed, larger register
> - size has minimal impact on threshold. */
> -#ifndef REP_STOSB_THRESHOLD
> -# define REP_STOSB_THRESHOLD 2048
> -#endif
> -
> #ifndef SECTION
> # error SECTION is not defined!
> #endif
> @@ -181,7 +171,7 @@ ENTRY (MEMSET_SYMBOL (__memset, unaligned_erms))
> ret
>
> L(stosb_more_2x_vec):
> - cmpq $REP_STOSB_THRESHOLD, %rdx
> + cmp __x86_rep_stosb_threshold(%rip), %RDX_LP
OK.
> ja L(stosb)
> #endif
> L(more_2x_vec):
>
@@ -396,6 +396,20 @@ to set threshold in bytes for non temporal store.
This tunable is specific to i386 and x86-64.
@end deftp
+@deftp Tunable glibc.cpu.x86_rep_movsb_threshold
+The @code{glibc.cpu.x86_rep_movsb_threshold} tunable allows the user
+to set threshold in bytes to start using "rep movsb".
+
+This tunable is specific to i386 and x86-64.
+@end deftp
+
+@deftp Tunable glibc.cpu.x86_rep_stosb_threshold
+The @code{glibc.cpu.x86_rep_stosb_threshold} tunable allows the user
+to set threshold in bytes to start using "rep stosb".
+
+This tunable is specific to i386 and x86-64.
+@end deftp
+
@deftp Tunable glibc.cpu.x86_ibt
The @code{glibc.cpu.x86_ibt} tunable allows the user to control how
indirect branch tracking (IBT) should be enabled. Accepted values are
@@ -41,6 +41,23 @@ long int __x86_raw_shared_cache_size attribute_hidden = 1024 * 1024;
/* Threshold to use non temporal store. */
long int __x86_shared_non_temporal_threshold attribute_hidden;
+/* Threshold to use Enhanced REP MOVSB. Since there is overhead to set
+ up REP MOVSB operation, REP MOVSB isn't faster on short data. The
+ memcpy micro benchmark in glibc shows that 2KB is the approximate
+ value above which REP MOVSB becomes faster than SSE2 optimization
+ on processors with Enhanced REP MOVSB. Since larger register size
+ can move more data with a single load and store, the threshold is
+ higher with larger register size. */
+long int __x86_rep_movsb_threshold attribute_hidden = 2048;
+
+/* Threshold to use Enhanced REP STOSB. Since there is overhead to set
+ up REP STOSB operation, REP STOSB isn't faster on short data. The
+ memset micro benchmark in glibc shows that 2KB is the approximate
+ value above which REP STOSB becomes faster on processors with
+ Enhanced REP STOSB. Since the stored value is fixed, larger register
+ size has minimal impact on threshold. */
+long int __x86_rep_stosb_threshold attribute_hidden = 2048;
+
#ifndef __x86_64__
/* PREFETCHW support flag for use in memory and string routines. */
int __x86_prefetchw attribute_hidden;
@@ -117,6 +134,9 @@ init_cacheinfo (void)
__x86_shared_non_temporal_threshold
= cpu_features->non_temporal_threshold;
+ __x86_rep_movsb_threshold = cpu_features->rep_movsb_threshold;
+ __x86_rep_stosb_threshold = cpu_features->rep_stosb_threshold;
+
#ifndef __x86_64__
__x86_prefetchw = cpu_features->prefetchw;
#endif
@@ -128,6 +128,10 @@ struct cpu_features
/* PREFETCHW support flag for use in memory and string routines. */
unsigned long int prefetchw;
#endif
+ /* Threshold to use "rep movsb". */
+ unsigned long int rep_movsb_threshold;
+ /* Threshold to use "rep stosb". */
+ unsigned long int rep_stosb_threshold;
};
/* Used from outside of glibc to get access to the CPU features
@@ -860,6 +860,31 @@ __init_cacheinfo (void)
total shared cache size. */
unsigned long int non_temporal_threshold = (shared * threads * 3 / 4);
+ /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8. */
+ unsigned long int minimum_rep_movsb_threshold;
+ /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16). See
+ comments for __x86_rep_movsb_threshold in cacheinfo.c. */
+ unsigned long int rep_movsb_threshold;
+ if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
+ && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
+ {
+ rep_movsb_threshold = 2048 * (64 / 16);
+ minimum_rep_movsb_threshold = 64 * 8;
+ }
+ else if (CPU_FEATURES_ARCH_P (cpu_features,
+ AVX_Fast_Unaligned_Load))
+ {
+ rep_movsb_threshold = 2048 * (32 / 16);
+ minimum_rep_movsb_threshold = 32 * 8;
+ }
+ else
+ {
+ rep_movsb_threshold = 2048 * (16 / 16);
+ minimum_rep_movsb_threshold = 16 * 8;
+ }
+ /* NB: See comments for __x86_rep_stosb_threshold in cacheinfo.c. */
+ unsigned long int rep_stosb_threshold = 2048;
+
#if HAVE_TUNABLES
long int tunable_size;
tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);
@@ -871,11 +896,19 @@ __init_cacheinfo (void)
tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
if (tunable_size != 0)
non_temporal_threshold = tunable_size;
+ tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
+ if (tunable_size > minimum_rep_movsb_threshold)
+ rep_movsb_threshold = tunable_size;
+ tunable_size = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL);
+ if (tunable_size != 0)
+ rep_stosb_threshold = tunable_size;
#endif
cpu_features->data_cache_size = data;
cpu_features->shared_cache_size = shared;
cpu_features->non_temporal_threshold = non_temporal_threshold;
+ cpu_features->rep_movsb_threshold = rep_movsb_threshold;
+ cpu_features->rep_stosb_threshold = rep_stosb_threshold;
#if HAVE_TUNABLES
TUNABLE_UPDATE (x86_data_cache_size, long int,
@@ -884,5 +917,10 @@ __init_cacheinfo (void)
shared, 0, (long int) -1);
TUNABLE_UPDATE (x86_non_temporal_threshold, long int,
non_temporal_threshold, 0, (long int) -1);
+ TUNABLE_UPDATE (x86_rep_movsb_threshold, long int,
+ rep_movsb_threshold, minimum_rep_movsb_threshold,
+ (long int) -1);
+ TUNABLE_UPDATE (x86_rep_stosb_threshold, long int,
+ rep_stosb_threshold, 0, (long int) -1);
#endif
}
@@ -30,6 +30,12 @@ glibc {
x86_non_temporal_threshold {
type: SIZE_T
}
+ x86_rep_movsb_threshold {
+ type: SIZE_T
+ }
+ x86_rep_stosb_threshold {
+ type: SIZE_T
+ }
x86_data_cache_size {
type: SIZE_T
}
@@ -56,17 +56,6 @@
# endif
#endif
-/* Threshold to use Enhanced REP MOVSB. Since there is overhead to set
- up REP MOVSB operation, REP MOVSB isn't faster on short data. The
- memcpy micro benchmark in glibc shows that 2KB is the approximate
- value above which REP MOVSB becomes faster than SSE2 optimization
- on processors with Enhanced REP MOVSB. Since larger register size
- can move more data with a single load and store, the threshold is
- higher with larger register size. */
-#ifndef REP_MOVSB_THRESHOLD
-# define REP_MOVSB_THRESHOLD (2048 * (VEC_SIZE / 16))
-#endif
-
#ifndef PREFETCH
# define PREFETCH(addr) prefetcht0 addr
#endif
@@ -253,9 +242,6 @@ L(movsb):
leaq (%rsi,%rdx), %r9
cmpq %r9, %rdi
/* Avoid slow backward REP MOVSB. */
-# if REP_MOVSB_THRESHOLD <= (VEC_SIZE * 8)
-# error Unsupported REP_MOVSB_THRESHOLD and VEC_SIZE!
-# endif
jb L(more_8x_vec_backward)
1:
mov %RDX_LP, %RCX_LP
@@ -331,7 +317,7 @@ L(between_2_3):
#if defined USE_MULTIARCH && IS_IN (libc)
L(movsb_more_2x_vec):
- cmpq $REP_MOVSB_THRESHOLD, %rdx
+ cmp __x86_rep_movsb_threshold(%rip), %RDX_LP
ja L(movsb)
#endif
L(more_2x_vec):
@@ -58,16 +58,6 @@
# endif
#endif
-/* Threshold to use Enhanced REP STOSB. Since there is overhead to set
- up REP STOSB operation, REP STOSB isn't faster on short data. The
- memset micro benchmark in glibc shows that 2KB is the approximate
- value above which REP STOSB becomes faster on processors with
- Enhanced REP STOSB. Since the stored value is fixed, larger register
- size has minimal impact on threshold. */
-#ifndef REP_STOSB_THRESHOLD
-# define REP_STOSB_THRESHOLD 2048
-#endif
-
#ifndef SECTION
# error SECTION is not defined!
#endif
@@ -181,7 +171,7 @@ ENTRY (MEMSET_SYMBOL (__memset, unaligned_erms))
ret
L(stosb_more_2x_vec):
- cmpq $REP_STOSB_THRESHOLD, %rdx
+ cmp __x86_rep_stosb_threshold(%rip), %RDX_LP
ja L(stosb)
#endif
L(more_2x_vec):