[v2] x86: Add new cpu-flag `Prefer_Non_Temporal`
Checks
Context |
Check |
Description |
redhat-pt-bot/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
redhat-pt-bot/TryBot-32bit |
success
|
Build for i686
|
linaro-tcwg-bot/tcwg_glibc_build--master-arm |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_glibc_check--master-arm |
success
|
Test passed
|
linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 |
success
|
Test passed
|
Commit Message
The goal of this flag is to allow targets which don't prefer/have ERMS
to still access the non-temporal memset implementation.
---
sysdeps/x86/cpu-tunables.c | 2 ++
sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
...cpu-features-preferred_feature_index_1.def | 1 +
sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
5 files changed, 45 insertions(+), 11 deletions(-)
Comments
On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> The goal of this flag is to allow targets which don't prefer/have ERMS
> to still access the non-temporal memset implementation.
> ---
> sysdeps/x86/cpu-tunables.c | 2 ++
> sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> ...cpu-features-preferred_feature_index_1.def | 1 +
> sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> 5 files changed, 45 insertions(+), 11 deletions(-)
>
> diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> index ccc6b64dc2..789646ba26 100644
> --- a/sysdeps/x86/cpu-tunables.c
> +++ b/sysdeps/x86/cpu-tunables.c
> @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> Fast_Unaligned_Load, 19);
> CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> Fast_Unaligned_Copy, 19);
> + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> + Prefer_Non_Temporal, 19);
> }
> break;
> case 20:
> diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> index a1c03b8903..b8ba0c098d 100644
> --- a/sysdeps/x86/dl-cacheinfo.h
> +++ b/sysdeps/x86/dl-cacheinfo.h
> @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> - && (cpu_features->basic.kind == arch_kind_intel
> + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> + || cpu_features->basic.kind == arch_kind_intel
Please set Prefer_Non_Temporal for Intel and AMD. There is no
need to check arch_kind_intel nor arch_kind_amd.
> || cpu_features->basic.kind == arch_kind_amd))
> memset_non_temporal_threshold = non_temporal_threshold;
>
> @@ -1042,14 +1043,37 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> slightly better than ERMS. */
> rep_stosb_threshold = SIZE_MAX;
>
> + /*
> + For memset, the non-temporal implementation is only accessed through the
> + stosb code. ie:
> + ```
> + if (size >= rep_stosb_thresh)
> + {
> + if (size >= non_temporal_thresh)
> + {
> + do_non_temporal ();
> + }
> + do_stosb ();
> + }
> + do_normal_vec_loop ();
> + ```
> + So if we prefer non-temporal, set `rep_stosb_thresh = non_temporal_thresh`
> + to enable the implementation. If `rep_stosb_thresh = non_temporal_thresh`,
> + `rep stosb` will never be used.
> + */
> + TUNABLE_SET_WITH_BOUNDS (x86_memset_non_temporal_threshold,
> + memset_non_temporal_threshold,
> + minimum_non_temporal_threshold, SIZE_MAX);
> + if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> + rep_stosb_threshold
> + = TUNABLE_GET (x86_memset_non_temporal_threshold, long int, NULL);
> +
> +
> TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, data, 0, SIZE_MAX);
> TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, shared, 0, SIZE_MAX);
> TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold,
> minimum_non_temporal_threshold,
> maximum_non_temporal_threshold);
> - TUNABLE_SET_WITH_BOUNDS (x86_memset_non_temporal_threshold,
> - memset_non_temporal_threshold,
> - minimum_non_temporal_threshold, SIZE_MAX);
> TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, rep_movsb_threshold,
> minimum_rep_movsb_threshold, SIZE_MAX);
> TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, rep_stosb_threshold, 1,
> diff --git a/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def b/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> index 61bbbc2e89..f15344a8be 100644
> --- a/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> +++ b/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> @@ -34,3 +34,4 @@ BIT (MathVec_Prefer_No_AVX512)
> BIT (Prefer_FSRM)
> BIT (Avoid_Short_Distance_REP_MOVSB)
> BIT (Avoid_Non_Temporal_Memset)
> +BIT (Prefer_Non_Temporal)
> \ No newline at end of file
> diff --git a/sysdeps/x86/tst-hwcap-tunables.c b/sysdeps/x86/tst-hwcap-tunables.c
> index 94307283d7..b33e0e92f4 100644
> --- a/sysdeps/x86/tst-hwcap-tunables.c
> +++ b/sysdeps/x86/tst-hwcap-tunables.c
> @@ -60,7 +60,8 @@ static const struct test_t
> /* Disable everything. */
> "-Prefer_ERMS,-Prefer_FSRM,-AVX,-AVX2,-AVX512F,-AVX512VL,"
> "-SSE4_1,-SSE4_2,-SSSE3,-Fast_Unaligned_Load,-ERMS,"
> - "-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset",
> + "-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,"
> + "-Prefer_Non_Temporal",
> test_1,
> array_length (test_1)
> },
> @@ -68,7 +69,8 @@ static const struct test_t
> /* Same as before, but with some empty suboptions. */
> ",-,-Prefer_ERMS,-Prefer_FSRM,-AVX,-AVX2,-AVX512F,-AVX512VL,"
> "-SSE4_1,-SSE4_2,-SSSE3,-Fast_Unaligned_Load,,-,"
> - "-ERMS,-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,-,",
> + "-ERMS,-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,"
> + "-Prefer_Non_Temporal,-,",
> test_1,
> array_length (test_1)
> }
> diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> index 7a637ef7ca..27f04e9dce 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> @@ -61,7 +61,8 @@ IFUNC_SELECTOR (void)
> && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> {
> - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> return OPTIMIZE (avx512_unaligned_erms);
>
> return OPTIMIZE (avx512_unaligned);
> @@ -76,7 +77,8 @@ IFUNC_SELECTOR (void)
> && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> {
> - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> return OPTIMIZE (evex_unaligned_erms);
>
> return OPTIMIZE (evex_unaligned);
> @@ -84,7 +86,8 @@ IFUNC_SELECTOR (void)
>
> if (CPU_FEATURE_USABLE_P (cpu_features, RTM))
> {
> - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> return OPTIMIZE (avx2_unaligned_erms_rtm);
>
> return OPTIMIZE (avx2_unaligned_rtm);
> @@ -93,14 +96,16 @@ IFUNC_SELECTOR (void)
> if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
> Prefer_No_VZEROUPPER, !))
> {
> - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> return OPTIMIZE (avx2_unaligned_erms);
>
> return OPTIMIZE (avx2_unaligned);
> }
> }
>
> - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> return OPTIMIZE (sse2_unaligned_erms);
>
> return OPTIMIZE (sse2_unaligned);
> --
> 2.34.1
>
On Mon, Aug 12, 2024 at 7:38 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > The goal of this flag is to allow targets which don't prefer/have ERMS
> > to still access the non-temporal memset implementation.
> > ---
> > sysdeps/x86/cpu-tunables.c | 2 ++
> > sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> > ...cpu-features-preferred_feature_index_1.def | 1 +
> > sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> > sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> > 5 files changed, 45 insertions(+), 11 deletions(-)
> >
> > diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> > index ccc6b64dc2..789646ba26 100644
> > --- a/sysdeps/x86/cpu-tunables.c
> > +++ b/sysdeps/x86/cpu-tunables.c
> > @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> > Fast_Unaligned_Load, 19);
> > CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > Fast_Unaligned_Copy, 19);
> > + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > + Prefer_Non_Temporal, 19);
> > }
> > break;
> > case 20:
> > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > index a1c03b8903..b8ba0c098d 100644
> > --- a/sysdeps/x86/dl-cacheinfo.h
> > +++ b/sysdeps/x86/dl-cacheinfo.h
> > @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> > unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> > if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> > - && (cpu_features->basic.kind == arch_kind_intel
> > + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > + || cpu_features->basic.kind == arch_kind_intel
>
> Please set Prefer_Non_Temporal for Intel and AMD. There is no
> need to check arch_kind_intel nor arch_kind_amd.
We shouldn't do that. Think you a bit of a misunderstanding.
Prefer_Non_Temporal will use non-temporal but *not erms*
which is not what we want on intel/amd.
Maybe I should rename the variable as I can see nothing about its
name implies that.
How about `Prefer_Non_Temporal_But_Not_ERMS` or is that too
cumbersome?
>
> > || cpu_features->basic.kind == arch_kind_amd))
> > memset_non_temporal_threshold = non_temporal_threshold;
> >
> > @@ -1042,14 +1043,37 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > slightly better than ERMS. */
> > rep_stosb_threshold = SIZE_MAX;
> >
> > + /*
> > + For memset, the non-temporal implementation is only accessed through the
> > + stosb code. ie:
> > + ```
> > + if (size >= rep_stosb_thresh)
> > + {
> > + if (size >= non_temporal_thresh)
> > + {
> > + do_non_temporal ();
> > + }
> > + do_stosb ();
> > + }
> > + do_normal_vec_loop ();
> > + ```
> > + So if we prefer non-temporal, set `rep_stosb_thresh = non_temporal_thresh`
> > + to enable the implementation. If `rep_stosb_thresh = non_temporal_thresh`,
> > + `rep stosb` will never be used.
> > + */
> > + TUNABLE_SET_WITH_BOUNDS (x86_memset_non_temporal_threshold,
> > + memset_non_temporal_threshold,
> > + minimum_non_temporal_threshold, SIZE_MAX);
> > + if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > + rep_stosb_threshold
> > + = TUNABLE_GET (x86_memset_non_temporal_threshold, long int, NULL);
> > +
> > +
> > TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, data, 0, SIZE_MAX);
> > TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, shared, 0, SIZE_MAX);
> > TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold,
> > minimum_non_temporal_threshold,
> > maximum_non_temporal_threshold);
> > - TUNABLE_SET_WITH_BOUNDS (x86_memset_non_temporal_threshold,
> > - memset_non_temporal_threshold,
> > - minimum_non_temporal_threshold, SIZE_MAX);
> > TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, rep_movsb_threshold,
> > minimum_rep_movsb_threshold, SIZE_MAX);
> > TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, rep_stosb_threshold, 1,
> > diff --git a/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def b/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> > index 61bbbc2e89..f15344a8be 100644
> > --- a/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> > +++ b/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> > @@ -34,3 +34,4 @@ BIT (MathVec_Prefer_No_AVX512)
> > BIT (Prefer_FSRM)
> > BIT (Avoid_Short_Distance_REP_MOVSB)
> > BIT (Avoid_Non_Temporal_Memset)
> > +BIT (Prefer_Non_Temporal)
> > \ No newline at end of file
> > diff --git a/sysdeps/x86/tst-hwcap-tunables.c b/sysdeps/x86/tst-hwcap-tunables.c
> > index 94307283d7..b33e0e92f4 100644
> > --- a/sysdeps/x86/tst-hwcap-tunables.c
> > +++ b/sysdeps/x86/tst-hwcap-tunables.c
> > @@ -60,7 +60,8 @@ static const struct test_t
> > /* Disable everything. */
> > "-Prefer_ERMS,-Prefer_FSRM,-AVX,-AVX2,-AVX512F,-AVX512VL,"
> > "-SSE4_1,-SSE4_2,-SSSE3,-Fast_Unaligned_Load,-ERMS,"
> > - "-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset",
> > + "-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,"
> > + "-Prefer_Non_Temporal",
> > test_1,
> > array_length (test_1)
> > },
> > @@ -68,7 +69,8 @@ static const struct test_t
> > /* Same as before, but with some empty suboptions. */
> > ",-,-Prefer_ERMS,-Prefer_FSRM,-AVX,-AVX2,-AVX512F,-AVX512VL,"
> > "-SSE4_1,-SSE4_2,-SSSE3,-Fast_Unaligned_Load,,-,"
> > - "-ERMS,-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,-,",
> > + "-ERMS,-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,"
> > + "-Prefer_Non_Temporal,-,",
> > test_1,
> > array_length (test_1)
> > }
> > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > index 7a637ef7ca..27f04e9dce 100644
> > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > @@ -61,7 +61,8 @@ IFUNC_SELECTOR (void)
> > && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > {
> > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > return OPTIMIZE (avx512_unaligned_erms);
> >
> > return OPTIMIZE (avx512_unaligned);
> > @@ -76,7 +77,8 @@ IFUNC_SELECTOR (void)
> > && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > {
> > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > return OPTIMIZE (evex_unaligned_erms);
> >
> > return OPTIMIZE (evex_unaligned);
> > @@ -84,7 +86,8 @@ IFUNC_SELECTOR (void)
> >
> > if (CPU_FEATURE_USABLE_P (cpu_features, RTM))
> > {
> > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > return OPTIMIZE (avx2_unaligned_erms_rtm);
> >
> > return OPTIMIZE (avx2_unaligned_rtm);
> > @@ -93,14 +96,16 @@ IFUNC_SELECTOR (void)
> > if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
> > Prefer_No_VZEROUPPER, !))
> > {
> > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > return OPTIMIZE (avx2_unaligned_erms);
> >
> > return OPTIMIZE (avx2_unaligned);
> > }
> > }
> >
> > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > return OPTIMIZE (sse2_unaligned_erms);
> >
> > return OPTIMIZE (sse2_unaligned);
> > --
> > 2.34.1
> >
>
>
> --
> H.J.
On Mon, Aug 12, 2024 at 12:06 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> On Mon, Aug 12, 2024 at 7:38 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> >
> > On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > >
> > > The goal of this flag is to allow targets which don't prefer/have ERMS
> > > to still access the non-temporal memset implementation.
> > > ---
> > > sysdeps/x86/cpu-tunables.c | 2 ++
> > > sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> > > ...cpu-features-preferred_feature_index_1.def | 1 +
> > > sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> > > sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> > > 5 files changed, 45 insertions(+), 11 deletions(-)
> > >
> > > diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> > > index ccc6b64dc2..789646ba26 100644
> > > --- a/sysdeps/x86/cpu-tunables.c
> > > +++ b/sysdeps/x86/cpu-tunables.c
> > > @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> > > Fast_Unaligned_Load, 19);
> > > CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > Fast_Unaligned_Copy, 19);
> > > + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > + Prefer_Non_Temporal, 19);
> > > }
> > > break;
> > > case 20:
> > > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > > index a1c03b8903..b8ba0c098d 100644
> > > --- a/sysdeps/x86/dl-cacheinfo.h
> > > +++ b/sysdeps/x86/dl-cacheinfo.h
> > > @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > > non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> > > unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> > > if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> > > - && (cpu_features->basic.kind == arch_kind_intel
> > > + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > > + || cpu_features->basic.kind == arch_kind_intel
> >
> > Please set Prefer_Non_Temporal for Intel and AMD. There is no
> > need to check arch_kind_intel nor arch_kind_amd.
>
> We shouldn't do that. Think you a bit of a misunderstanding.
> Prefer_Non_Temporal will use non-temporal but *not erms*
> which is not what we want on intel/amd.
>
> Maybe I should rename the variable as I can see nothing about its
> name implies that.
>
> How about `Prefer_Non_Temporal_But_Not_ERMS` or is that too
> cumbersome?
Prefer_Non_Temporal_Without_ERMS? Please move
rep_stosb_threshold setting close to
tunable_size = TUNABLE_GET (x86_memset_non_temporal_threshold, long
int, NULL);
if (tunable_size > minimum_non_temporal_threshold
&& tunable_size <= maximum_non_temporal_threshold)
memset_non_temporal_threshold = tunable_size;
> >
> > > || cpu_features->basic.kind == arch_kind_amd))
> > > memset_non_temporal_threshold = non_temporal_threshold;
> > >
> > > @@ -1042,14 +1043,37 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > > slightly better than ERMS. */
> > > rep_stosb_threshold = SIZE_MAX;
> > >
> > > + /*
> > > + For memset, the non-temporal implementation is only accessed through the
> > > + stosb code. ie:
> > > + ```
> > > + if (size >= rep_stosb_thresh)
> > > + {
> > > + if (size >= non_temporal_thresh)
> > > + {
> > > + do_non_temporal ();
> > > + }
> > > + do_stosb ();
> > > + }
> > > + do_normal_vec_loop ();
> > > + ```
> > > + So if we prefer non-temporal, set `rep_stosb_thresh = non_temporal_thresh`
> > > + to enable the implementation. If `rep_stosb_thresh = non_temporal_thresh`,
> > > + `rep stosb` will never be used.
> > > + */
> > > + TUNABLE_SET_WITH_BOUNDS (x86_memset_non_temporal_threshold,
> > > + memset_non_temporal_threshold,
> > > + minimum_non_temporal_threshold, SIZE_MAX);
> > > + if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > > + rep_stosb_threshold
> > > + = TUNABLE_GET (x86_memset_non_temporal_threshold, long int, NULL);
> > > +
> > > +
> > > TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, data, 0, SIZE_MAX);
> > > TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, shared, 0, SIZE_MAX);
> > > TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold,
> > > minimum_non_temporal_threshold,
> > > maximum_non_temporal_threshold);
> > > - TUNABLE_SET_WITH_BOUNDS (x86_memset_non_temporal_threshold,
> > > - memset_non_temporal_threshold,
> > > - minimum_non_temporal_threshold, SIZE_MAX);
> > > TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, rep_movsb_threshold,
> > > minimum_rep_movsb_threshold, SIZE_MAX);
> > > TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, rep_stosb_threshold, 1,
> > > diff --git a/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def b/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> > > index 61bbbc2e89..f15344a8be 100644
> > > --- a/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> > > +++ b/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def
> > > @@ -34,3 +34,4 @@ BIT (MathVec_Prefer_No_AVX512)
> > > BIT (Prefer_FSRM)
> > > BIT (Avoid_Short_Distance_REP_MOVSB)
> > > BIT (Avoid_Non_Temporal_Memset)
> > > +BIT (Prefer_Non_Temporal)
> > > \ No newline at end of file
> > > diff --git a/sysdeps/x86/tst-hwcap-tunables.c b/sysdeps/x86/tst-hwcap-tunables.c
> > > index 94307283d7..b33e0e92f4 100644
> > > --- a/sysdeps/x86/tst-hwcap-tunables.c
> > > +++ b/sysdeps/x86/tst-hwcap-tunables.c
> > > @@ -60,7 +60,8 @@ static const struct test_t
> > > /* Disable everything. */
> > > "-Prefer_ERMS,-Prefer_FSRM,-AVX,-AVX2,-AVX512F,-AVX512VL,"
> > > "-SSE4_1,-SSE4_2,-SSSE3,-Fast_Unaligned_Load,-ERMS,"
> > > - "-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset",
> > > + "-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,"
> > > + "-Prefer_Non_Temporal",
> > > test_1,
> > > array_length (test_1)
> > > },
> > > @@ -68,7 +69,8 @@ static const struct test_t
> > > /* Same as before, but with some empty suboptions. */
> > > ",-,-Prefer_ERMS,-Prefer_FSRM,-AVX,-AVX2,-AVX512F,-AVX512VL,"
> > > "-SSE4_1,-SSE4_2,-SSSE3,-Fast_Unaligned_Load,,-,"
> > > - "-ERMS,-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,-,",
> > > + "-ERMS,-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,"
> > > + "-Prefer_Non_Temporal,-,",
> > > test_1,
> > > array_length (test_1)
> > > }
> > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > index 7a637ef7ca..27f04e9dce 100644
> > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > @@ -61,7 +61,8 @@ IFUNC_SELECTOR (void)
> > > && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > > && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > > {
> > > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > > return OPTIMIZE (avx512_unaligned_erms);
> > >
> > > return OPTIMIZE (avx512_unaligned);
> > > @@ -76,7 +77,8 @@ IFUNC_SELECTOR (void)
> > > && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > > && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > > {
> > > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > > return OPTIMIZE (evex_unaligned_erms);
> > >
> > > return OPTIMIZE (evex_unaligned);
> > > @@ -84,7 +86,8 @@ IFUNC_SELECTOR (void)
> > >
> > > if (CPU_FEATURE_USABLE_P (cpu_features, RTM))
> > > {
> > > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > > return OPTIMIZE (avx2_unaligned_erms_rtm);
> > >
> > > return OPTIMIZE (avx2_unaligned_rtm);
> > > @@ -93,14 +96,16 @@ IFUNC_SELECTOR (void)
> > > if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
> > > Prefer_No_VZEROUPPER, !))
> > > {
> > > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > > return OPTIMIZE (avx2_unaligned_erms);
> > >
> > > return OPTIMIZE (avx2_unaligned);
> > > }
> > > }
> > >
> > > - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
> > > + || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
> > > return OPTIMIZE (sse2_unaligned_erms);
> > >
> > > return OPTIMIZE (sse2_unaligned);
> > > --
> > > 2.34.1
> > >
> >
> >
> > --
> > H.J.
On Mon, Aug 12, 2024 at 12:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Mon, Aug 12, 2024 at 12:06 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > On Mon, Aug 12, 2024 at 7:38 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > >
> > > On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > >
> > > > The goal of this flag is to allow targets which don't prefer/have ERMS
> > > > to still access the non-temporal memset implementation.
> > > > ---
> > > > sysdeps/x86/cpu-tunables.c | 2 ++
> > > > sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> > > > ...cpu-features-preferred_feature_index_1.def | 1 +
> > > > sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> > > > 5 files changed, 45 insertions(+), 11 deletions(-)
> > > >
> > > > diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> > > > index ccc6b64dc2..789646ba26 100644
> > > > --- a/sysdeps/x86/cpu-tunables.c
> > > > +++ b/sysdeps/x86/cpu-tunables.c
> > > > @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> > > > Fast_Unaligned_Load, 19);
> > > > CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > Fast_Unaligned_Copy, 19);
> > > > + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > + Prefer_Non_Temporal, 19);
> > > > }
> > > > break;
> > > > case 20:
> > > > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > > > index a1c03b8903..b8ba0c098d 100644
> > > > --- a/sysdeps/x86/dl-cacheinfo.h
> > > > +++ b/sysdeps/x86/dl-cacheinfo.h
> > > > @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > > > non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> > > > unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> > > > if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> > > > - && (cpu_features->basic.kind == arch_kind_intel
> > > > + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > > > + || cpu_features->basic.kind == arch_kind_intel
> > >
> > > Please set Prefer_Non_Temporal for Intel and AMD. There is no
> > > need to check arch_kind_intel nor arch_kind_amd.
> >
> > We shouldn't do that. Think you a bit of a misunderstanding.
> > Prefer_Non_Temporal will use non-temporal but *not erms*
> > which is not what we want on intel/amd.
> >
> > Maybe I should rename the variable as I can see nothing about its
> > name implies that.
> >
> > How about `Prefer_Non_Temporal_But_Not_ERMS` or is that too
> > cumbersome?
>
> Prefer_Non_Temporal_Without_ERMS? Please move
> rep_stosb_threshold setting close to
>
> tunable_size = TUNABLE_GET (x86_memset_non_temporal_threshold, long
> int, NULL);
> if (tunable_size > minimum_non_temporal_threshold
> && tunable_size <= maximum_non_temporal_threshold)
> memset_non_temporal_threshold = tunable_size;
>
How about something like this?
On Tue, Aug 13, 2024 at 9:08 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Mon, Aug 12, 2024 at 12:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> >
> > On Mon, Aug 12, 2024 at 12:06 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > >
> > > On Mon, Aug 12, 2024 at 7:38 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > >
> > > > On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > > >
> > > > > The goal of this flag is to allow targets which don't prefer/have ERMS
> > > > > to still access the non-temporal memset implementation.
> > > > > ---
> > > > > sysdeps/x86/cpu-tunables.c | 2 ++
> > > > > sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> > > > > ...cpu-features-preferred_feature_index_1.def | 1 +
> > > > > sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> > > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> > > > > 5 files changed, 45 insertions(+), 11 deletions(-)
> > > > >
> > > > > diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> > > > > index ccc6b64dc2..789646ba26 100644
> > > > > --- a/sysdeps/x86/cpu-tunables.c
> > > > > +++ b/sysdeps/x86/cpu-tunables.c
> > > > > @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> > > > > Fast_Unaligned_Load, 19);
> > > > > CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > Fast_Unaligned_Copy, 19);
> > > > > + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > + Prefer_Non_Temporal, 19);
> > > > > }
> > > > > break;
> > > > > case 20:
> > > > > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > > > > index a1c03b8903..b8ba0c098d 100644
> > > > > --- a/sysdeps/x86/dl-cacheinfo.h
> > > > > +++ b/sysdeps/x86/dl-cacheinfo.h
> > > > > @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > > > > non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> > > > > unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> > > > > if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> > > > > - && (cpu_features->basic.kind == arch_kind_intel
> > > > > + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > > > > + || cpu_features->basic.kind == arch_kind_intel
> > > >
> > > > Please set Prefer_Non_Temporal for Intel and AMD. There is no
> > > > need to check arch_kind_intel nor arch_kind_amd.
> > >
> > > We shouldn't do that. Think you a bit of a misunderstanding.
> > > Prefer_Non_Temporal will use non-temporal but *not erms*
> > > which is not what we want on intel/amd.
> > >
> > > Maybe I should rename the variable as I can see nothing about its
> > > name implies that.
> > >
> > > How about `Prefer_Non_Temporal_But_Not_ERMS` or is that too
> > > cumbersome?
> >
> > Prefer_Non_Temporal_Without_ERMS? Please move
> > rep_stosb_threshold setting close to
Sure, thought about that but was worried is implies ERMS not being
available, but think its clearer than what we have now.
> >
> > tunable_size = TUNABLE_GET (x86_memset_non_temporal_threshold, long
> > int, NULL);
> > if (tunable_size > minimum_non_temporal_threshold
> > && tunable_size <= maximum_non_temporal_threshold)
> > memset_non_temporal_threshold = tunable_size;
> >
>
> How about something like this?
IIUC that will only skip `rep stosb` if we have ERMS. Personally not a fan
of that restrictions. We may find cases where we don't want `rep stosb` at
all despite having ERMS (as may be the case for SKX).
>
> --
> H.J.
On Tue, Aug 13, 2024 at 6:25 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> On Tue, Aug 13, 2024 at 9:08 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> >
> > On Mon, Aug 12, 2024 at 12:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > >
> > > On Mon, Aug 12, 2024 at 12:06 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > >
> > > > On Mon, Aug 12, 2024 at 7:38 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > > >
> > > > > On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > > > >
> > > > > > The goal of this flag is to allow targets which don't prefer/have ERMS
> > > > > > to still access the non-temporal memset implementation.
> > > > > > ---
> > > > > > sysdeps/x86/cpu-tunables.c | 2 ++
> > > > > > sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> > > > > > ...cpu-features-preferred_feature_index_1.def | 1 +
> > > > > > sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> > > > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> > > > > > 5 files changed, 45 insertions(+), 11 deletions(-)
> > > > > >
> > > > > > diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> > > > > > index ccc6b64dc2..789646ba26 100644
> > > > > > --- a/sysdeps/x86/cpu-tunables.c
> > > > > > +++ b/sysdeps/x86/cpu-tunables.c
> > > > > > @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> > > > > > Fast_Unaligned_Load, 19);
> > > > > > CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > > Fast_Unaligned_Copy, 19);
> > > > > > + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > > + Prefer_Non_Temporal, 19);
> > > > > > }
> > > > > > break;
> > > > > > case 20:
> > > > > > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > > > > > index a1c03b8903..b8ba0c098d 100644
> > > > > > --- a/sysdeps/x86/dl-cacheinfo.h
> > > > > > +++ b/sysdeps/x86/dl-cacheinfo.h
> > > > > > @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > > > > > non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> > > > > > unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> > > > > > if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> > > > > > - && (cpu_features->basic.kind == arch_kind_intel
> > > > > > + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > > > > > + || cpu_features->basic.kind == arch_kind_intel
> > > > >
> > > > > Please set Prefer_Non_Temporal for Intel and AMD. There is no
> > > > > need to check arch_kind_intel nor arch_kind_amd.
> > > >
> > > > We shouldn't do that. Think you a bit of a misunderstanding.
> > > > Prefer_Non_Temporal will use non-temporal but *not erms*
> > > > which is not what we want on intel/amd.
> > > >
> > > > Maybe I should rename the variable as I can see nothing about its
> > > > name implies that.
> > > >
> > > > How about `Prefer_Non_Temporal_But_Not_ERMS` or is that too
> > > > cumbersome?
> > >
> > > Prefer_Non_Temporal_Without_ERMS? Please move
> > > rep_stosb_threshold setting close to
>
> Sure, thought about that but was worried is implies ERMS not being
> available, but think its clearer than what we have now.
> > >
> > > tunable_size = TUNABLE_GET (x86_memset_non_temporal_threshold, long
> > > int, NULL);
> > > if (tunable_size > minimum_non_temporal_threshold
> > > && tunable_size <= maximum_non_temporal_threshold)
> > > memset_non_temporal_threshold = tunable_size;
> > >
> >
> > How about something like this?
>
> IIUC that will only skip `rep stosb` if we have ERMS. Personally not a fan
The code is
+ else if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
+ && !CPU_FEATURE_USABLE_P (cpu_features, ERMS))
+ rep_stosb_threshold = tunable_size;
It will skip `rep stosb` if we DON'T have ERMS.
> of that restrictions. We may find cases where we don't want `rep stosb` at
> all despite having ERMS (as may be the case for SKX).
We can make ERMS unsable in this case.
> >
> > --
> > H.J.
On Tue, Aug 13, 2024 at 9:29 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Tue, Aug 13, 2024 at 6:25 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > On Tue, Aug 13, 2024 at 9:08 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > >
> > > On Mon, Aug 12, 2024 at 12:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > >
> > > > On Mon, Aug 12, 2024 at 12:06 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > > >
> > > > > On Mon, Aug 12, 2024 at 7:38 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > > > >
> > > > > > On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > > > > >
> > > > > > > The goal of this flag is to allow targets which don't prefer/have ERMS
> > > > > > > to still access the non-temporal memset implementation.
> > > > > > > ---
> > > > > > > sysdeps/x86/cpu-tunables.c | 2 ++
> > > > > > > sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> > > > > > > ...cpu-features-preferred_feature_index_1.def | 1 +
> > > > > > > sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> > > > > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> > > > > > > 5 files changed, 45 insertions(+), 11 deletions(-)
> > > > > > >
> > > > > > > diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> > > > > > > index ccc6b64dc2..789646ba26 100644
> > > > > > > --- a/sysdeps/x86/cpu-tunables.c
> > > > > > > +++ b/sysdeps/x86/cpu-tunables.c
> > > > > > > @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> > > > > > > Fast_Unaligned_Load, 19);
> > > > > > > CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > > > Fast_Unaligned_Copy, 19);
> > > > > > > + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > > > + Prefer_Non_Temporal, 19);
> > > > > > > }
> > > > > > > break;
> > > > > > > case 20:
> > > > > > > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > > > > > > index a1c03b8903..b8ba0c098d 100644
> > > > > > > --- a/sysdeps/x86/dl-cacheinfo.h
> > > > > > > +++ b/sysdeps/x86/dl-cacheinfo.h
> > > > > > > @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > > > > > > non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> > > > > > > unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> > > > > > > if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> > > > > > > - && (cpu_features->basic.kind == arch_kind_intel
> > > > > > > + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > > > > > > + || cpu_features->basic.kind == arch_kind_intel
> > > > > >
> > > > > > Please set Prefer_Non_Temporal for Intel and AMD. There is no
> > > > > > need to check arch_kind_intel nor arch_kind_amd.
> > > > >
> > > > > We shouldn't do that. Think you a bit of a misunderstanding.
> > > > > Prefer_Non_Temporal will use non-temporal but *not erms*
> > > > > which is not what we want on intel/amd.
> > > > >
> > > > > Maybe I should rename the variable as I can see nothing about its
> > > > > name implies that.
> > > > >
> > > > > How about `Prefer_Non_Temporal_But_Not_ERMS` or is that too
> > > > > cumbersome?
> > > >
> > > > Prefer_Non_Temporal_Without_ERMS? Please move
> > > > rep_stosb_threshold setting close to
> >
> > Sure, thought about that but was worried is implies ERMS not being
> > available, but think its clearer than what we have now.
> > > >
> > > > tunable_size = TUNABLE_GET (x86_memset_non_temporal_threshold, long
> > > > int, NULL);
> > > > if (tunable_size > minimum_non_temporal_threshold
> > > > && tunable_size <= maximum_non_temporal_threshold)
> > > > memset_non_temporal_threshold = tunable_size;
> > > >
> > >
> > > How about something like this?
> >
> > IIUC that will only skip `rep stosb` if we have ERMS. Personally not a fan
>
> The code is
>
> + else if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> + && !CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> + rep_stosb_threshold = tunable_size;
>
> It will skip `rep stosb` if we DON'T have ERMS.
Yes but it will only set the `rep stosb` threshold if we don't have ERMS
so req non-erms to use the `rep stosb` codes as staging point for non-temporal
which is the point of this patch.
>
> > of that restrictions. We may find cases where we don't want `rep stosb` at
> > all despite having ERMS (as may be the case for SKX).
>
> We can make ERMS unsable in this case.
That would work, although I don't think its easier to start juggling
capabilities.
I don't think we should start disabling the usability of features to
meet preferences.
>
> > >
> > > --
> > > H.J.
>
>
>
> --
> H.J.
On Tue, Aug 13, 2024 at 6:35 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> On Tue, Aug 13, 2024 at 9:29 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> >
> > On Tue, Aug 13, 2024 at 6:25 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > >
> > > On Tue, Aug 13, 2024 at 9:08 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > >
> > > > On Mon, Aug 12, 2024 at 12:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > > >
> > > > > On Mon, Aug 12, 2024 at 12:06 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > > > >
> > > > > > On Mon, Aug 12, 2024 at 7:38 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > > > > >
> > > > > > > On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > > > > > >
> > > > > > > > The goal of this flag is to allow targets which don't prefer/have ERMS
> > > > > > > > to still access the non-temporal memset implementation.
> > > > > > > > ---
> > > > > > > > sysdeps/x86/cpu-tunables.c | 2 ++
> > > > > > > > sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> > > > > > > > ...cpu-features-preferred_feature_index_1.def | 1 +
> > > > > > > > sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> > > > > > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> > > > > > > > 5 files changed, 45 insertions(+), 11 deletions(-)
> > > > > > > >
> > > > > > > > diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> > > > > > > > index ccc6b64dc2..789646ba26 100644
> > > > > > > > --- a/sysdeps/x86/cpu-tunables.c
> > > > > > > > +++ b/sysdeps/x86/cpu-tunables.c
> > > > > > > > @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> > > > > > > > Fast_Unaligned_Load, 19);
> > > > > > > > CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > > > > Fast_Unaligned_Copy, 19);
> > > > > > > > + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > > > > + Prefer_Non_Temporal, 19);
> > > > > > > > }
> > > > > > > > break;
> > > > > > > > case 20:
> > > > > > > > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > > > > > > > index a1c03b8903..b8ba0c098d 100644
> > > > > > > > --- a/sysdeps/x86/dl-cacheinfo.h
> > > > > > > > +++ b/sysdeps/x86/dl-cacheinfo.h
> > > > > > > > @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > > > > > > > non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> > > > > > > > unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> > > > > > > > if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> > > > > > > > - && (cpu_features->basic.kind == arch_kind_intel
> > > > > > > > + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > > > > > > > + || cpu_features->basic.kind == arch_kind_intel
> > > > > > >
> > > > > > > Please set Prefer_Non_Temporal for Intel and AMD. There is no
> > > > > > > need to check arch_kind_intel nor arch_kind_amd.
> > > > > >
> > > > > > We shouldn't do that. Think you a bit of a misunderstanding.
> > > > > > Prefer_Non_Temporal will use non-temporal but *not erms*
> > > > > > which is not what we want on intel/amd.
> > > > > >
> > > > > > Maybe I should rename the variable as I can see nothing about its
> > > > > > name implies that.
> > > > > >
> > > > > > How about `Prefer_Non_Temporal_But_Not_ERMS` or is that too
> > > > > > cumbersome?
> > > > >
> > > > > Prefer_Non_Temporal_Without_ERMS? Please move
> > > > > rep_stosb_threshold setting close to
> > >
> > > Sure, thought about that but was worried is implies ERMS not being
> > > available, but think its clearer than what we have now.
> > > > >
> > > > > tunable_size = TUNABLE_GET (x86_memset_non_temporal_threshold, long
> > > > > int, NULL);
> > > > > if (tunable_size > minimum_non_temporal_threshold
> > > > > && tunable_size <= maximum_non_temporal_threshold)
> > > > > memset_non_temporal_threshold = tunable_size;
> > > > >
> > > >
> > > > How about something like this?
> > >
> > > IIUC that will only skip `rep stosb` if we have ERMS. Personally not a fan
> >
> > The code is
> >
> > + else if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > + && !CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > + rep_stosb_threshold = tunable_size;
> >
> > It will skip `rep stosb` if we DON'T have ERMS.
>
> Yes but it will only set the `rep stosb` threshold if we don't have ERMS
> so req non-erms to use the `rep stosb` codes as staging point for non-temporal
> which is the point of this patch.
>
> >
> > > of that restrictions. We may find cases where we don't want `rep stosb` at
> > > all despite having ERMS (as may be the case for SKX).
> >
> > We can make ERMS unsable in this case.
>
> That would work, although I don't think its easier to start juggling
> capabilities.
> I don't think we should start disabling the usability of features to
> meet preferences.
Each feature has a bit to indicate it is available, another bit to
indicate it is usable. If a feature should be avoided, we turn
off the usable bit. This is how it works.
> >
> > > >
> > > > --
> > > > H.J.
> >
> >
> >
> > --
> > H.J.
On Tue, Aug 13, 2024 at 9:53 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Tue, Aug 13, 2024 at 6:35 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > On Tue, Aug 13, 2024 at 9:29 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > >
> > > On Tue, Aug 13, 2024 at 6:25 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > >
> > > > On Tue, Aug 13, 2024 at 9:08 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > > >
> > > > > On Mon, Aug 12, 2024 at 12:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > > > >
> > > > > > On Mon, Aug 12, 2024 at 12:06 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > > > > >
> > > > > > > On Mon, Aug 12, 2024 at 7:38 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > > > > > > >
> > > > > > > > On Sat, Aug 10, 2024 at 10:56 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > > > > > > >
> > > > > > > > > The goal of this flag is to allow targets which don't prefer/have ERMS
> > > > > > > > > to still access the non-temporal memset implementation.
> > > > > > > > > ---
> > > > > > > > > sysdeps/x86/cpu-tunables.c | 2 ++
> > > > > > > > > sysdeps/x86/dl-cacheinfo.h | 32 ++++++++++++++++---
> > > > > > > > > ...cpu-features-preferred_feature_index_1.def | 1 +
> > > > > > > > > sysdeps/x86/tst-hwcap-tunables.c | 6 ++--
> > > > > > > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 15 ++++++---
> > > > > > > > > 5 files changed, 45 insertions(+), 11 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c
> > > > > > > > > index ccc6b64dc2..789646ba26 100644
> > > > > > > > > --- a/sysdeps/x86/cpu-tunables.c
> > > > > > > > > +++ b/sysdeps/x86/cpu-tunables.c
> > > > > > > > > @@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
> > > > > > > > > Fast_Unaligned_Load, 19);
> > > > > > > > > CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > > > > > Fast_Unaligned_Copy, 19);
> > > > > > > > > + CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
> > > > > > > > > + Prefer_Non_Temporal, 19);
> > > > > > > > > }
> > > > > > > > > break;
> > > > > > > > > case 20:
> > > > > > > > > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
> > > > > > > > > index a1c03b8903..b8ba0c098d 100644
> > > > > > > > > --- a/sysdeps/x86/dl-cacheinfo.h
> > > > > > > > > +++ b/sysdeps/x86/dl-cacheinfo.h
> > > > > > > > > @@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
> > > > > > > > > non_temporal_threshold. Enable this for both Intel and AMD hardware. */
> > > > > > > > > unsigned long int memset_non_temporal_threshold = SIZE_MAX;
> > > > > > > > > if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
> > > > > > > > > - && (cpu_features->basic.kind == arch_kind_intel
> > > > > > > > > + && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > > > > > > > > + || cpu_features->basic.kind == arch_kind_intel
> > > > > > > >
> > > > > > > > Please set Prefer_Non_Temporal for Intel and AMD. There is no
> > > > > > > > need to check arch_kind_intel nor arch_kind_amd.
> > > > > > >
> > > > > > > We shouldn't do that. Think you a bit of a misunderstanding.
> > > > > > > Prefer_Non_Temporal will use non-temporal but *not erms*
> > > > > > > which is not what we want on intel/amd.
> > > > > > >
> > > > > > > Maybe I should rename the variable as I can see nothing about its
> > > > > > > name implies that.
> > > > > > >
> > > > > > > How about `Prefer_Non_Temporal_But_Not_ERMS` or is that too
> > > > > > > cumbersome?
> > > > > >
> > > > > > Prefer_Non_Temporal_Without_ERMS? Please move
> > > > > > rep_stosb_threshold setting close to
> > > >
> > > > Sure, thought about that but was worried is implies ERMS not being
> > > > available, but think its clearer than what we have now.
> > > > > >
> > > > > > tunable_size = TUNABLE_GET (x86_memset_non_temporal_threshold, long
> > > > > > int, NULL);
> > > > > > if (tunable_size > minimum_non_temporal_threshold
> > > > > > && tunable_size <= maximum_non_temporal_threshold)
> > > > > > memset_non_temporal_threshold = tunable_size;
> > > > > >
> > > > >
> > > > > How about something like this?
> > > >
> > > > IIUC that will only skip `rep stosb` if we have ERMS. Personally not a fan
> > >
> > > The code is
> > >
> > > + else if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
> > > + && !CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > + rep_stosb_threshold = tunable_size;
> > >
> > > It will skip `rep stosb` if we DON'T have ERMS.
> >
> > Yes but it will only set the `rep stosb` threshold if we don't have ERMS
> > so req non-erms to use the `rep stosb` codes as staging point for non-temporal
> > which is the point of this patch.
> >
> > >
> > > > of that restrictions. We may find cases where we don't want `rep stosb` at
> > > > all despite having ERMS (as may be the case for SKX).
> > >
> > > We can make ERMS unsable in this case.
> >
> > That would work, although I don't think its easier to start juggling
> > capabilities.
> > I don't think we should start disabling the usability of features to
> > meet preferences.
>
> Each feature has a bit to indicate it is available, another bit to
> indicate it is usable. If a feature should be avoided, we turn
> off the usable bit. This is how it works.
Ah good point. Although we would need a bit to distiguish between
stosb vs movsb then.
Let me post with rename + refactor so the code is closer. If its still
unclear we can re-evaluate.
>
> > >
> > > > >
> > > > > --
> > > > > H.J.
> > >
> > >
> > >
> > > --
> > > H.J.
>
>
>
> --
> H.J.
@@ -223,6 +223,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp)
Fast_Unaligned_Load, 19);
CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
Fast_Unaligned_Copy, 19);
+ CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features,
+ Prefer_Non_Temporal, 19);
}
break;
case 20:
@@ -992,7 +992,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
non_temporal_threshold. Enable this for both Intel and AMD hardware. */
unsigned long int memset_non_temporal_threshold = SIZE_MAX;
if (!CPU_FEATURES_ARCH_P (cpu_features, Avoid_Non_Temporal_Memset)
- && (cpu_features->basic.kind == arch_kind_intel
+ && (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal)
+ || cpu_features->basic.kind == arch_kind_intel
|| cpu_features->basic.kind == arch_kind_amd))
memset_non_temporal_threshold = non_temporal_threshold;
@@ -1042,14 +1043,37 @@ dl_init_cacheinfo (struct cpu_features *cpu_features)
slightly better than ERMS. */
rep_stosb_threshold = SIZE_MAX;
+ /*
+ For memset, the non-temporal implementation is only accessed through the
+ stosb code. ie:
+ ```
+ if (size >= rep_stosb_thresh)
+ {
+ if (size >= non_temporal_thresh)
+ {
+ do_non_temporal ();
+ }
+ do_stosb ();
+ }
+ do_normal_vec_loop ();
+ ```
+ So if we prefer non-temporal, set `rep_stosb_thresh = non_temporal_thresh`
+ to enable the implementation. If `rep_stosb_thresh = non_temporal_thresh`,
+ `rep stosb` will never be used.
+ */
+ TUNABLE_SET_WITH_BOUNDS (x86_memset_non_temporal_threshold,
+ memset_non_temporal_threshold,
+ minimum_non_temporal_threshold, SIZE_MAX);
+ if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
+ rep_stosb_threshold
+ = TUNABLE_GET (x86_memset_non_temporal_threshold, long int, NULL);
+
+
TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, data, 0, SIZE_MAX);
TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, shared, 0, SIZE_MAX);
TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold,
minimum_non_temporal_threshold,
maximum_non_temporal_threshold);
- TUNABLE_SET_WITH_BOUNDS (x86_memset_non_temporal_threshold,
- memset_non_temporal_threshold,
- minimum_non_temporal_threshold, SIZE_MAX);
TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, rep_movsb_threshold,
minimum_rep_movsb_threshold, SIZE_MAX);
TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, rep_stosb_threshold, 1,
@@ -34,3 +34,4 @@ BIT (MathVec_Prefer_No_AVX512)
BIT (Prefer_FSRM)
BIT (Avoid_Short_Distance_REP_MOVSB)
BIT (Avoid_Non_Temporal_Memset)
+BIT (Prefer_Non_Temporal)
\ No newline at end of file
@@ -60,7 +60,8 @@ static const struct test_t
/* Disable everything. */
"-Prefer_ERMS,-Prefer_FSRM,-AVX,-AVX2,-AVX512F,-AVX512VL,"
"-SSE4_1,-SSE4_2,-SSSE3,-Fast_Unaligned_Load,-ERMS,"
- "-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset",
+ "-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,"
+ "-Prefer_Non_Temporal",
test_1,
array_length (test_1)
},
@@ -68,7 +69,8 @@ static const struct test_t
/* Same as before, but with some empty suboptions. */
",-,-Prefer_ERMS,-Prefer_FSRM,-AVX,-AVX2,-AVX512F,-AVX512VL,"
"-SSE4_1,-SSE4_2,-SSSE3,-Fast_Unaligned_Load,,-,"
- "-ERMS,-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,-,",
+ "-ERMS,-AVX_Fast_Unaligned_Load,-Avoid_Non_Temporal_Memset,"
+ "-Prefer_Non_Temporal,-,",
test_1,
array_length (test_1)
}
@@ -61,7 +61,8 @@ IFUNC_SELECTOR (void)
&& X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
&& X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
{
- if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
+ if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
+ || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
return OPTIMIZE (avx512_unaligned_erms);
return OPTIMIZE (avx512_unaligned);
@@ -76,7 +77,8 @@ IFUNC_SELECTOR (void)
&& X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
&& X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
{
- if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
+ if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
+ || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
return OPTIMIZE (evex_unaligned_erms);
return OPTIMIZE (evex_unaligned);
@@ -84,7 +86,8 @@ IFUNC_SELECTOR (void)
if (CPU_FEATURE_USABLE_P (cpu_features, RTM))
{
- if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
+ if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
+ || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
return OPTIMIZE (avx2_unaligned_erms_rtm);
return OPTIMIZE (avx2_unaligned_rtm);
@@ -93,14 +96,16 @@ IFUNC_SELECTOR (void)
if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
Prefer_No_VZEROUPPER, !))
{
- if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
+ if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
+ || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
return OPTIMIZE (avx2_unaligned_erms);
return OPTIMIZE (avx2_unaligned);
}
}
- if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
+ if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)
+ || CPU_FEATURES_ARCH_P (cpu_features, Prefer_Non_Temporal))
return OPTIMIZE (sse2_unaligned_erms);
return OPTIMIZE (sse2_unaligned);