aarch64: Optimize SVE cos & cosf

Message ID 20231004093750.48645-1-Joe.Ramsay@arm.com
State Committed
Commit 9180160e0898f23b06fbd88af9c3c840e791169a
Headers
Series aarch64: Optimize SVE cos & cosf |

Checks

Context Check Description
redhat-pt-bot/TryBot-apply_patch success Patch applied to master at the time it was sent
redhat-pt-bot/TryBot-32bit success Build for i686
linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 success Testing passed
linaro-tcwg-bot/tcwg_glibc_build--master-arm success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-arm success Testing passed

Commit Message

Joe Ramsay Oct. 4, 2023, 9:37 a.m. UTC
  Saves a mov by ensuring return value does not need to be moved out of
the way before special-case branch. Also change to use overloaded
intrinsics.
---
Thanks,
Joe
 sysdeps/aarch64/fpu/cos_sve.c  | 53 +++++++++++++++++-----------------
 sysdeps/aarch64/fpu/cosf_sve.c | 47 ++++++++++++++----------------
 2 files changed, 47 insertions(+), 53 deletions(-)
  

Comments

Szabolcs Nagy Oct. 5, 2023, 3:58 p.m. UTC | #1
The 10/04/2023 10:37, Joe Ramsay wrote:
> Saves a mov by ensuring return value does not need to be moved out of
> the way before special-case branch. Also change to use overloaded
> intrinsics.

looks good. committed.

> ---
> Thanks,
> Joe
>  sysdeps/aarch64/fpu/cos_sve.c  | 53 +++++++++++++++++-----------------
>  sysdeps/aarch64/fpu/cosf_sve.c | 47 ++++++++++++++----------------
>  2 files changed, 47 insertions(+), 53 deletions(-)
> 
> diff --git a/sysdeps/aarch64/fpu/cos_sve.c b/sysdeps/aarch64/fpu/cos_sve.c
> index d7a9b134da..c804e52fc6 100644
> --- a/sysdeps/aarch64/fpu/cos_sve.c
> +++ b/sysdeps/aarch64/fpu/cos_sve.c
> @@ -37,9 +37,9 @@ static const struct data
>  #define RangeVal 0x4160000000000000 /* asuint64 (0x1p23).  */
>  
>  static svfloat64_t NOINLINE
> -special_case (svfloat64_t x, svfloat64_t y, svbool_t out_of_bounds)
> +special_case (svfloat64_t x, svfloat64_t y, svbool_t oob)
>  {
> -  return sv_call_f64 (cos, x, y, out_of_bounds);
> +  return sv_call_f64 (cos, x, y, oob);
>  }
>  
>  /* A fast SVE implementation of cos based on trigonometric
> @@ -51,42 +51,41 @@ svfloat64_t SV_NAME_D1 (cos) (svfloat64_t x, const svbool_t pg)
>  {
>    const struct data *d = ptr_barrier (&data);
>  
> -  svfloat64_t r = svabs_f64_x (pg, x);
> -  svbool_t out_of_bounds
> -      = svcmpge_n_u64 (pg, svreinterpret_u64_f64 (r), RangeVal);
> +  svfloat64_t r = svabs_x (pg, x);
> +  svbool_t oob = svcmpge (pg, svreinterpret_u64 (r), RangeVal);
>  
>    /* Load some constants in quad-word chunks to minimise memory access.  */
>    svbool_t ptrue = svptrue_b64 ();
> -  svfloat64_t invpio2_and_pio2_1 = svld1rq_f64 (ptrue, &d->inv_pio2);
> -  svfloat64_t pio2_23 = svld1rq_f64 (ptrue, &d->pio2_2);
> +  svfloat64_t invpio2_and_pio2_1 = svld1rq (ptrue, &d->inv_pio2);
> +  svfloat64_t pio2_23 = svld1rq (ptrue, &d->pio2_2);
>  
>    /* n = rint(|x|/(pi/2)).  */
> -  svfloat64_t q = svmla_lane_f64 (sv_f64 (d->shift), r, invpio2_and_pio2_1, 0);
> -  svfloat64_t n = svsub_n_f64_x (pg, q, d->shift);
> +  svfloat64_t q = svmla_lane (sv_f64 (d->shift), r, invpio2_and_pio2_1, 0);
> +  svfloat64_t n = svsub_x (pg, q, d->shift);
>  
>    /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
> -  r = svmls_lane_f64 (r, n, invpio2_and_pio2_1, 1);
> -  r = svmls_lane_f64 (r, n, pio2_23, 0);
> -  r = svmls_lane_f64 (r, n, pio2_23, 1);
> +  r = svmls_lane (r, n, invpio2_and_pio2_1, 1);
> +  r = svmls_lane (r, n, pio2_23, 0);
> +  r = svmls_lane (r, n, pio2_23, 1);
>  
>    /* cos(r) poly approx.  */
> -  svfloat64_t r2 = svtsmul_f64 (r, svreinterpret_u64_f64 (q));
> +  svfloat64_t r2 = svtsmul (r, svreinterpret_u64 (q));
>    svfloat64_t y = sv_f64 (0.0);
> -  y = svtmad_f64 (y, r2, 7);
> -  y = svtmad_f64 (y, r2, 6);
> -  y = svtmad_f64 (y, r2, 5);
> -  y = svtmad_f64 (y, r2, 4);
> -  y = svtmad_f64 (y, r2, 3);
> -  y = svtmad_f64 (y, r2, 2);
> -  y = svtmad_f64 (y, r2, 1);
> -  y = svtmad_f64 (y, r2, 0);
> +  y = svtmad (y, r2, 7);
> +  y = svtmad (y, r2, 6);
> +  y = svtmad (y, r2, 5);
> +  y = svtmad (y, r2, 4);
> +  y = svtmad (y, r2, 3);
> +  y = svtmad (y, r2, 2);
> +  y = svtmad (y, r2, 1);
> +  y = svtmad (y, r2, 0);
>  
>    /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
> -  svfloat64_t f = svtssel_f64 (r, svreinterpret_u64_f64 (q));
> -  /* Apply factor.  */
> -  y = svmul_f64_x (pg, f, y);
> +  svfloat64_t f = svtssel (r, svreinterpret_u64 (q));
> +
> +  if (__glibc_unlikely (svptest_any (pg, oob)))
> +    return special_case (x, svmul_x (svnot_z (pg, oob), y, f), oob);
>  
> -  if (__glibc_unlikely (svptest_any (pg, out_of_bounds)))
> -    return special_case (x, y, out_of_bounds);
> -  return y;
> +  /* Apply factor.  */
> +  return svmul_x (pg, f, y);
>  }
> diff --git a/sysdeps/aarch64/fpu/cosf_sve.c b/sysdeps/aarch64/fpu/cosf_sve.c
> index 577cbd864e..a0be56ec7e 100644
> --- a/sysdeps/aarch64/fpu/cosf_sve.c
> +++ b/sysdeps/aarch64/fpu/cosf_sve.c
> @@ -37,9 +37,9 @@ static const struct data
>  #define RangeVal 0x49800000 /* asuint32(0x1p20f).  */
>  
>  static svfloat32_t NOINLINE
> -special_case (svfloat32_t x, svfloat32_t y, svbool_t out_of_bounds)
> +special_case (svfloat32_t x, svfloat32_t y, svbool_t oob)
>  {
> -  return sv_call_f32 (cosf, x, y, out_of_bounds);
> +  return sv_call_f32 (cosf, x, y, oob);
>  }
>  
>  /* A fast SVE implementation of cosf based on trigonometric
> @@ -51,40 +51,35 @@ svfloat32_t SV_NAME_F1 (cos) (svfloat32_t x, const svbool_t pg)
>  {
>    const struct data *d = ptr_barrier (&data);
>  
> -  svfloat32_t r = svabs_f32_x (pg, x);
> -  svbool_t out_of_bounds
> -    = svcmpge_n_u32 (pg, svreinterpret_u32_f32 (r), RangeVal);
> +  svfloat32_t r = svabs_x (pg, x);
> +  svbool_t oob = svcmpge (pg, svreinterpret_u32 (r), RangeVal);
>  
>    /* Load some constants in quad-word chunks to minimise memory access.  */
> -  svfloat32_t negpio2_and_invpio2
> -      = svld1rq_f32 (svptrue_b32 (), &d->neg_pio2_1);
> +  svfloat32_t negpio2_and_invpio2 = svld1rq (svptrue_b32 (), &d->neg_pio2_1);
>  
>    /* n = rint(|x|/(pi/2)).  */
> -  svfloat32_t q
> -      = svmla_lane_f32 (sv_f32 (d->shift), r, negpio2_and_invpio2, 3);
> -  svfloat32_t n = svsub_n_f32_x (pg, q, d->shift);
> +  svfloat32_t q = svmla_lane (sv_f32 (d->shift), r, negpio2_and_invpio2, 3);
> +  svfloat32_t n = svsub_x (pg, q, d->shift);
>  
>    /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
> -  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 0);
> -  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 1);
> -  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 2);
> +  r = svmla_lane (r, n, negpio2_and_invpio2, 0);
> +  r = svmla_lane (r, n, negpio2_and_invpio2, 1);
> +  r = svmla_lane (r, n, negpio2_and_invpio2, 2);
>  
>    /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
> -  svfloat32_t f = svtssel_f32 (r, svreinterpret_u32_f32 (q));
> +  svfloat32_t f = svtssel (r, svreinterpret_u32 (q));
>  
>    /* cos(r) poly approx.  */
> -  svfloat32_t r2 = svtsmul_f32 (r, svreinterpret_u32_f32 (q));
> +  svfloat32_t r2 = svtsmul (r, svreinterpret_u32 (q));
>    svfloat32_t y = sv_f32 (0.0f);
> -  y = svtmad_f32 (y, r2, 4);
> -  y = svtmad_f32 (y, r2, 3);
> -  y = svtmad_f32 (y, r2, 2);
> -  y = svtmad_f32 (y, r2, 1);
> -  y = svtmad_f32 (y, r2, 0);
> -
> +  y = svtmad (y, r2, 4);
> +  y = svtmad (y, r2, 3);
> +  y = svtmad (y, r2, 2);
> +  y = svtmad (y, r2, 1);
> +  y = svtmad (y, r2, 0);
> +
> +  if (__glibc_unlikely (svptest_any (pg, oob)))
> +    return special_case (x, svmul_x (svnot_z (pg, oob), f, y), oob);
>    /* Apply factor.  */
> -  y = svmul_f32_x (pg, f, y);
> -
> -  if (__glibc_unlikely (svptest_any (pg, out_of_bounds)))
> -    return special_case (x, y, out_of_bounds);
> -  return y;
> +  return svmul_x (pg, f, y);
>  }
> -- 
> 2.27.0
>
  

Patch

diff --git a/sysdeps/aarch64/fpu/cos_sve.c b/sysdeps/aarch64/fpu/cos_sve.c
index d7a9b134da..c804e52fc6 100644
--- a/sysdeps/aarch64/fpu/cos_sve.c
+++ b/sysdeps/aarch64/fpu/cos_sve.c
@@ -37,9 +37,9 @@  static const struct data
 #define RangeVal 0x4160000000000000 /* asuint64 (0x1p23).  */
 
 static svfloat64_t NOINLINE
-special_case (svfloat64_t x, svfloat64_t y, svbool_t out_of_bounds)
+special_case (svfloat64_t x, svfloat64_t y, svbool_t oob)
 {
-  return sv_call_f64 (cos, x, y, out_of_bounds);
+  return sv_call_f64 (cos, x, y, oob);
 }
 
 /* A fast SVE implementation of cos based on trigonometric
@@ -51,42 +51,41 @@  svfloat64_t SV_NAME_D1 (cos) (svfloat64_t x, const svbool_t pg)
 {
   const struct data *d = ptr_barrier (&data);
 
-  svfloat64_t r = svabs_f64_x (pg, x);
-  svbool_t out_of_bounds
-      = svcmpge_n_u64 (pg, svreinterpret_u64_f64 (r), RangeVal);
+  svfloat64_t r = svabs_x (pg, x);
+  svbool_t oob = svcmpge (pg, svreinterpret_u64 (r), RangeVal);
 
   /* Load some constants in quad-word chunks to minimise memory access.  */
   svbool_t ptrue = svptrue_b64 ();
-  svfloat64_t invpio2_and_pio2_1 = svld1rq_f64 (ptrue, &d->inv_pio2);
-  svfloat64_t pio2_23 = svld1rq_f64 (ptrue, &d->pio2_2);
+  svfloat64_t invpio2_and_pio2_1 = svld1rq (ptrue, &d->inv_pio2);
+  svfloat64_t pio2_23 = svld1rq (ptrue, &d->pio2_2);
 
   /* n = rint(|x|/(pi/2)).  */
-  svfloat64_t q = svmla_lane_f64 (sv_f64 (d->shift), r, invpio2_and_pio2_1, 0);
-  svfloat64_t n = svsub_n_f64_x (pg, q, d->shift);
+  svfloat64_t q = svmla_lane (sv_f64 (d->shift), r, invpio2_and_pio2_1, 0);
+  svfloat64_t n = svsub_x (pg, q, d->shift);
 
   /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
-  r = svmls_lane_f64 (r, n, invpio2_and_pio2_1, 1);
-  r = svmls_lane_f64 (r, n, pio2_23, 0);
-  r = svmls_lane_f64 (r, n, pio2_23, 1);
+  r = svmls_lane (r, n, invpio2_and_pio2_1, 1);
+  r = svmls_lane (r, n, pio2_23, 0);
+  r = svmls_lane (r, n, pio2_23, 1);
 
   /* cos(r) poly approx.  */
-  svfloat64_t r2 = svtsmul_f64 (r, svreinterpret_u64_f64 (q));
+  svfloat64_t r2 = svtsmul (r, svreinterpret_u64 (q));
   svfloat64_t y = sv_f64 (0.0);
-  y = svtmad_f64 (y, r2, 7);
-  y = svtmad_f64 (y, r2, 6);
-  y = svtmad_f64 (y, r2, 5);
-  y = svtmad_f64 (y, r2, 4);
-  y = svtmad_f64 (y, r2, 3);
-  y = svtmad_f64 (y, r2, 2);
-  y = svtmad_f64 (y, r2, 1);
-  y = svtmad_f64 (y, r2, 0);
+  y = svtmad (y, r2, 7);
+  y = svtmad (y, r2, 6);
+  y = svtmad (y, r2, 5);
+  y = svtmad (y, r2, 4);
+  y = svtmad (y, r2, 3);
+  y = svtmad (y, r2, 2);
+  y = svtmad (y, r2, 1);
+  y = svtmad (y, r2, 0);
 
   /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
-  svfloat64_t f = svtssel_f64 (r, svreinterpret_u64_f64 (q));
-  /* Apply factor.  */
-  y = svmul_f64_x (pg, f, y);
+  svfloat64_t f = svtssel (r, svreinterpret_u64 (q));
+
+  if (__glibc_unlikely (svptest_any (pg, oob)))
+    return special_case (x, svmul_x (svnot_z (pg, oob), y, f), oob);
 
-  if (__glibc_unlikely (svptest_any (pg, out_of_bounds)))
-    return special_case (x, y, out_of_bounds);
-  return y;
+  /* Apply factor.  */
+  return svmul_x (pg, f, y);
 }
diff --git a/sysdeps/aarch64/fpu/cosf_sve.c b/sysdeps/aarch64/fpu/cosf_sve.c
index 577cbd864e..a0be56ec7e 100644
--- a/sysdeps/aarch64/fpu/cosf_sve.c
+++ b/sysdeps/aarch64/fpu/cosf_sve.c
@@ -37,9 +37,9 @@  static const struct data
 #define RangeVal 0x49800000 /* asuint32(0x1p20f).  */
 
 static svfloat32_t NOINLINE
-special_case (svfloat32_t x, svfloat32_t y, svbool_t out_of_bounds)
+special_case (svfloat32_t x, svfloat32_t y, svbool_t oob)
 {
-  return sv_call_f32 (cosf, x, y, out_of_bounds);
+  return sv_call_f32 (cosf, x, y, oob);
 }
 
 /* A fast SVE implementation of cosf based on trigonometric
@@ -51,40 +51,35 @@  svfloat32_t SV_NAME_F1 (cos) (svfloat32_t x, const svbool_t pg)
 {
   const struct data *d = ptr_barrier (&data);
 
-  svfloat32_t r = svabs_f32_x (pg, x);
-  svbool_t out_of_bounds
-    = svcmpge_n_u32 (pg, svreinterpret_u32_f32 (r), RangeVal);
+  svfloat32_t r = svabs_x (pg, x);
+  svbool_t oob = svcmpge (pg, svreinterpret_u32 (r), RangeVal);
 
   /* Load some constants in quad-word chunks to minimise memory access.  */
-  svfloat32_t negpio2_and_invpio2
-      = svld1rq_f32 (svptrue_b32 (), &d->neg_pio2_1);
+  svfloat32_t negpio2_and_invpio2 = svld1rq (svptrue_b32 (), &d->neg_pio2_1);
 
   /* n = rint(|x|/(pi/2)).  */
-  svfloat32_t q
-      = svmla_lane_f32 (sv_f32 (d->shift), r, negpio2_and_invpio2, 3);
-  svfloat32_t n = svsub_n_f32_x (pg, q, d->shift);
+  svfloat32_t q = svmla_lane (sv_f32 (d->shift), r, negpio2_and_invpio2, 3);
+  svfloat32_t n = svsub_x (pg, q, d->shift);
 
   /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
-  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 0);
-  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 1);
-  r = svmla_lane_f32 (r, n, negpio2_and_invpio2, 2);
+  r = svmla_lane (r, n, negpio2_and_invpio2, 0);
+  r = svmla_lane (r, n, negpio2_and_invpio2, 1);
+  r = svmla_lane (r, n, negpio2_and_invpio2, 2);
 
   /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
-  svfloat32_t f = svtssel_f32 (r, svreinterpret_u32_f32 (q));
+  svfloat32_t f = svtssel (r, svreinterpret_u32 (q));
 
   /* cos(r) poly approx.  */
-  svfloat32_t r2 = svtsmul_f32 (r, svreinterpret_u32_f32 (q));
+  svfloat32_t r2 = svtsmul (r, svreinterpret_u32 (q));
   svfloat32_t y = sv_f32 (0.0f);
-  y = svtmad_f32 (y, r2, 4);
-  y = svtmad_f32 (y, r2, 3);
-  y = svtmad_f32 (y, r2, 2);
-  y = svtmad_f32 (y, r2, 1);
-  y = svtmad_f32 (y, r2, 0);
-
+  y = svtmad (y, r2, 4);
+  y = svtmad (y, r2, 3);
+  y = svtmad (y, r2, 2);
+  y = svtmad (y, r2, 1);
+  y = svtmad (y, r2, 0);
+
+  if (__glibc_unlikely (svptest_any (pg, oob)))
+    return special_case (x, svmul_x (svnot_z (pg, oob), f, y), oob);
   /* Apply factor.  */
-  y = svmul_f32_x (pg, f, y);
-
-  if (__glibc_unlikely (svptest_any (pg, out_of_bounds)))
-    return special_case (x, y, out_of_bounds);
-  return y;
+  return svmul_x (pg, f, y);
 }