[4/8] aarch64/fpu: Add vector variants of asinh

Message ID 20240220165945.21585-4-Joe.Ramsay@arm.com
State Committed
Commit 81406ea3c5b5ad19e307302c13dd642785b47948
Delegated to: Maxim Kuvyrkov
Headers
Series [1/8] aarch64/fpu: Add vector variants of erf |

Checks

Context Check Description
redhat-pt-bot/TryBot-apply_patch success Patch applied to master at the time it was sent
linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 success Testing passed
linaro-tcwg-bot/tcwg_glibc_build--master-arm success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-arm success Testing passed

Commit Message

Joe Ramsay Feb. 20, 2024, 4:59 p.m. UTC
  ---
Thanks,
Joe
 sysdeps/aarch64/fpu/Makefile                  |   1 +
 sysdeps/aarch64/fpu/Versions                  |   5 +
 sysdeps/aarch64/fpu/advsimd_f32_protos.h      |   1 +
 sysdeps/aarch64/fpu/asinh_advsimd.c           | 171 ++++++++++++++++++
 sysdeps/aarch64/fpu/asinh_sve.c               | 150 +++++++++++++++
 sysdeps/aarch64/fpu/asinhf_advsimd.c          |  80 ++++++++
 sysdeps/aarch64/fpu/asinhf_sve.c              |  56 ++++++
 sysdeps/aarch64/fpu/bits/math-vector.h        |   8 +
 .../fpu/test-double-advsimd-wrappers.c        |   1 +
 .../aarch64/fpu/test-double-sve-wrappers.c    |   1 +
 .../aarch64/fpu/test-float-advsimd-wrappers.c |   1 +
 sysdeps/aarch64/fpu/test-float-sve-wrappers.c |   1 +
 sysdeps/aarch64/libm-test-ulps                |   8 +
 .../unix/sysv/linux/aarch64/libmvec.abilist   |   5 +
 14 files changed, 489 insertions(+)
 create mode 100644 sysdeps/aarch64/fpu/asinh_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/asinh_sve.c
 create mode 100644 sysdeps/aarch64/fpu/asinhf_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/asinhf_sve.c
  

Comments

Szabolcs Nagy March 21, 2024, 2:44 p.m. UTC | #1
The 02/20/2024 16:59, Joe Ramsay wrote:
> ---

OK.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>

> Thanks,
> Joe
>  sysdeps/aarch64/fpu/Makefile                  |   1 +
>  sysdeps/aarch64/fpu/Versions                  |   5 +
>  sysdeps/aarch64/fpu/advsimd_f32_protos.h      |   1 +
>  sysdeps/aarch64/fpu/asinh_advsimd.c           | 171 ++++++++++++++++++
>  sysdeps/aarch64/fpu/asinh_sve.c               | 150 +++++++++++++++
>  sysdeps/aarch64/fpu/asinhf_advsimd.c          |  80 ++++++++
>  sysdeps/aarch64/fpu/asinhf_sve.c              |  56 ++++++
>  sysdeps/aarch64/fpu/bits/math-vector.h        |   8 +
>  .../fpu/test-double-advsimd-wrappers.c        |   1 +
>  .../aarch64/fpu/test-double-sve-wrappers.c    |   1 +
>  .../aarch64/fpu/test-float-advsimd-wrappers.c |   1 +
>  sysdeps/aarch64/fpu/test-float-sve-wrappers.c |   1 +
>  sysdeps/aarch64/libm-test-ulps                |   8 +
>  .../unix/sysv/linux/aarch64/libmvec.abilist   |   5 +
>  14 files changed, 489 insertions(+)
>  create mode 100644 sysdeps/aarch64/fpu/asinh_advsimd.c
>  create mode 100644 sysdeps/aarch64/fpu/asinh_sve.c
>  create mode 100644 sysdeps/aarch64/fpu/asinhf_advsimd.c
>  create mode 100644 sysdeps/aarch64/fpu/asinhf_sve.c
  

Patch

diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile
index 2e5bbb5a07..d474f2969d 100644
--- a/sysdeps/aarch64/fpu/Makefile
+++ b/sysdeps/aarch64/fpu/Makefile
@@ -1,6 +1,7 @@ 
 libmvec-supported-funcs = acos \
                           acosh \
                           asin \
+                          asinh \
                           atan \
                           atan2 \
                           cos \
diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions
index 60e1cdeace..08ea15efae 100644
--- a/sysdeps/aarch64/fpu/Versions
+++ b/sysdeps/aarch64/fpu/Versions
@@ -84,6 +84,11 @@  libmvec {
     _ZGVnN4v_acoshf;
     _ZGVsMxv_acosh;
     _ZGVsMxv_acoshf;
+    _ZGVnN2v_asinh;
+    _ZGVnN2v_asinhf;
+    _ZGVnN4v_asinhf;
+    _ZGVsMxv_asinh;
+    _ZGVsMxv_asinhf;
     _ZGVnN2v_cosh;
     _ZGVnN2v_coshf;
     _ZGVnN4v_coshf;
diff --git a/sysdeps/aarch64/fpu/advsimd_f32_protos.h b/sysdeps/aarch64/fpu/advsimd_f32_protos.h
index 22fec4de77..1e80721c9f 100644
--- a/sysdeps/aarch64/fpu/advsimd_f32_protos.h
+++ b/sysdeps/aarch64/fpu/advsimd_f32_protos.h
@@ -20,6 +20,7 @@ 
 libmvec_hidden_proto (V_NAME_F1(acos));
 libmvec_hidden_proto (V_NAME_F1(acosh));
 libmvec_hidden_proto (V_NAME_F1(asin));
+libmvec_hidden_proto (V_NAME_F1(asinh));
 libmvec_hidden_proto (V_NAME_F1(atan));
 libmvec_hidden_proto (V_NAME_F1(cos));
 libmvec_hidden_proto (V_NAME_F1(cosh));
diff --git a/sysdeps/aarch64/fpu/asinh_advsimd.c b/sysdeps/aarch64/fpu/asinh_advsimd.c
new file mode 100644
index 0000000000..544a52f651
--- /dev/null
+++ b/sysdeps/aarch64/fpu/asinh_advsimd.c
@@ -0,0 +1,171 @@ 
+/* Double-precision vector (Advanced SIMD) asinh function
+
+   Copyright (C) 2024 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+#include "poly_advsimd_f64.h"
+
+#define A(i) v_f64 (__v_log_data.poly[i])
+#define N (1 << V_LOG_TABLE_BITS)
+
+const static struct data
+{
+  float64x2_t poly[18];
+  uint64x2_t off, huge_bound, abs_mask;
+  float64x2_t ln2, tiny_bound;
+} data = {
+  .off = V2 (0x3fe6900900000000),
+  .ln2 = V2 (0x1.62e42fefa39efp-1),
+  .huge_bound = V2 (0x5fe0000000000000),
+  .tiny_bound = V2 (0x1p-26),
+  .abs_mask = V2 (0x7fffffffffffffff),
+  /* Even terms of polynomial s.t. asinh(x) is approximated by
+     asinh(x) ~= x + x^3 * (C0 + C1 * x + C2 * x^2 + C3 * x^3 + ...).
+     Generated using Remez, f = (asinh(sqrt(x)) - sqrt(x))/x^(3/2).  */
+  .poly = { V2 (-0x1.55555555554a7p-3), V2 (0x1.3333333326c7p-4),
+	    V2 (-0x1.6db6db68332e6p-5), V2 (0x1.f1c71b26fb40dp-6),
+	    V2 (-0x1.6e8b8b654a621p-6), V2 (0x1.1c4daa9e67871p-6),
+	    V2 (-0x1.c9871d10885afp-7), V2 (0x1.7a16e8d9d2ecfp-7),
+	    V2 (-0x1.3ddca533e9f54p-7), V2 (0x1.0becef748dafcp-7),
+	    V2 (-0x1.b90c7099dd397p-8), V2 (0x1.541f2bb1ffe51p-8),
+	    V2 (-0x1.d217026a669ecp-9), V2 (0x1.0b5c7977aaf7p-9),
+	    V2 (-0x1.e0f37daef9127p-11), V2 (0x1.388b5fe542a6p-12),
+	    V2 (-0x1.021a48685e287p-14), V2 (0x1.93d4ba83d34dap-18) },
+};
+
+static float64x2_t NOINLINE VPCS_ATTR
+special_case (float64x2_t x, float64x2_t y, uint64x2_t special)
+{
+  return v_call_f64 (asinh, x, y, special);
+}
+
+struct entry
+{
+  float64x2_t invc;
+  float64x2_t logc;
+};
+
+static inline struct entry
+lookup (uint64x2_t i)
+{
+  float64x2_t e0 = vld1q_f64 (
+      &__v_log_data.table[(i[0] >> (52 - V_LOG_TABLE_BITS)) & (N - 1)].invc);
+  float64x2_t e1 = vld1q_f64 (
+      &__v_log_data.table[(i[1] >> (52 - V_LOG_TABLE_BITS)) & (N - 1)].invc);
+  return (struct entry){ vuzp1q_f64 (e0, e1), vuzp2q_f64 (e0, e1) };
+}
+
+static inline float64x2_t
+log_inline (float64x2_t x, const struct data *d)
+{
+  /* Double-precision vector log, copied from ordinary vector log with some
+     cosmetic modification and special-cases removed.  */
+  uint64x2_t ix = vreinterpretq_u64_f64 (x);
+  uint64x2_t tmp = vsubq_u64 (ix, d->off);
+  int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52);
+  uint64x2_t iz
+      = vsubq_u64 (ix, vandq_u64 (tmp, vdupq_n_u64 (0xfffULL << 52)));
+  float64x2_t z = vreinterpretq_f64_u64 (iz);
+  struct entry e = lookup (tmp);
+  float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc);
+  float64x2_t kd = vcvtq_f64_s64 (k);
+  float64x2_t hi = vfmaq_f64 (vaddq_f64 (e.logc, r), kd, d->ln2);
+  float64x2_t r2 = vmulq_f64 (r, r);
+  float64x2_t y = vfmaq_f64 (A (2), A (3), r);
+  float64x2_t p = vfmaq_f64 (A (0), A (1), r);
+  y = vfmaq_f64 (y, A (4), r2);
+  y = vfmaq_f64 (p, y, r2);
+  y = vfmaq_f64 (hi, y, r2);
+  return y;
+}
+
+/* Double-precision implementation of vector asinh(x).
+   asinh is very sensitive around 1, so it is impractical to devise a single
+   low-cost algorithm which is sufficiently accurate on a wide range of input.
+   Instead we use two different algorithms:
+   asinh(x) = sign(x) * log(|x| + sqrt(x^2 + 1)      if |x| >= 1
+	    = sign(x) * (|x| + |x|^3 * P(x^2))       otherwise
+   where log(x) is an optimized log approximation, and P(x) is a polynomial
+   shared with the scalar routine. The greatest observed error 3.29 ULP, in
+   |x| >= 1:
+   __v_asinh(0x1.2cd9d717e2c9bp+0) got 0x1.ffffcfd0e234fp-1
+				  want 0x1.ffffcfd0e2352p-1.  */
+VPCS_ATTR float64x2_t V_NAME_D1 (asinh) (float64x2_t x)
+{
+  const struct data *d = ptr_barrier (&data);
+
+  float64x2_t ax = vabsq_f64 (x);
+  uint64x2_t iax = vreinterpretq_u64_f64 (ax);
+
+  uint64x2_t gt1 = vcgeq_f64 (ax, v_f64 (1));
+  uint64x2_t special = vcgeq_u64 (iax, d->huge_bound);
+
+#if WANT_SIMD_EXCEPT
+  uint64x2_t tiny = vcltq_f64 (ax, d->tiny_bound);
+  special = vorrq_u64 (special, tiny);
+#endif
+
+  /* Option 1: |x| >= 1.
+     Compute asinh(x) according by asinh(x) = log(x + sqrt(x^2 + 1)).
+     If WANT_SIMD_EXCEPT is enabled, sidestep special values, which will
+     overflow, by setting special lanes to 1. These will be fixed later.  */
+  float64x2_t option_1 = v_f64 (0);
+  if (__glibc_likely (v_any_u64 (gt1)))
+    {
+#if WANT_SIMD_EXCEPT
+      float64x2_t xm = v_zerofy_f64 (ax, special);
+#else
+      float64x2_t xm = ax;
+#endif
+      option_1 = log_inline (
+	  vaddq_f64 (xm, vsqrtq_f64 (vfmaq_f64 (v_f64 (1), xm, xm))), d);
+    }
+
+  /* Option 2: |x| < 1.
+     Compute asinh(x) using a polynomial.
+     If WANT_SIMD_EXCEPT is enabled, sidestep special lanes, which will
+     overflow, and tiny lanes, which will underflow, by setting them to 0. They
+     will be fixed later, either by selecting x or falling back to the scalar
+     special-case. The largest observed error in this region is 1.47 ULPs:
+     __v_asinh(0x1.fdfcd00cc1e6ap-1) got 0x1.c1d6bf874019bp-1
+				    want 0x1.c1d6bf874019cp-1.  */
+  float64x2_t option_2 = v_f64 (0);
+  if (__glibc_likely (v_any_u64 (vceqzq_u64 (gt1))))
+    {
+#if WANT_SIMD_EXCEPT
+      ax = v_zerofy_f64 (ax, vorrq_u64 (tiny, gt1));
+#endif
+      float64x2_t x2 = vmulq_f64 (ax, ax), x3 = vmulq_f64 (ax, x2),
+		  z2 = vmulq_f64 (x2, x2), z4 = vmulq_f64 (z2, z2),
+		  z8 = vmulq_f64 (z4, z4), z16 = vmulq_f64 (z8, z8);
+      float64x2_t p = v_estrin_17_f64 (x2, z2, z4, z8, z16, d->poly);
+      option_2 = vfmaq_f64 (ax, p, x3);
+#if WANT_SIMD_EXCEPT
+      option_2 = vbslq_f64 (tiny, x, option_2);
+#endif
+    }
+
+  /* Choose the right option for each lane.  */
+  float64x2_t y = vbslq_f64 (gt1, option_1, option_2);
+  /* Copy sign.  */
+  y = vbslq_f64 (d->abs_mask, y, x);
+
+  if (__glibc_unlikely (v_any_u64 (special)))
+    return special_case (x, y, special);
+  return y;
+}
diff --git a/sysdeps/aarch64/fpu/asinh_sve.c b/sysdeps/aarch64/fpu/asinh_sve.c
new file mode 100644
index 0000000000..28dc5c4587
--- /dev/null
+++ b/sysdeps/aarch64/fpu/asinh_sve.c
@@ -0,0 +1,150 @@ 
+/* Double-precision vector (SVE) asinh function
+
+   Copyright (C) 2024 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+#include "poly_sve_f64.h"
+
+#define SignMask (0x8000000000000000)
+#define One (0x3ff0000000000000)
+#define Thres (0x5fe0000000000000) /* asuint64 (0x1p511).  */
+
+static const struct data
+{
+  double poly[18];
+  double ln2, p3, p1, p4, p0, p2;
+  uint64_t n;
+  uint64_t off;
+
+} data = {
+  /* Polynomial generated using Remez on [2^-26, 1].  */
+  .poly
+  = { -0x1.55555555554a7p-3, 0x1.3333333326c7p-4, -0x1.6db6db68332e6p-5,
+      0x1.f1c71b26fb40dp-6, -0x1.6e8b8b654a621p-6, 0x1.1c4daa9e67871p-6,
+      -0x1.c9871d10885afp-7, 0x1.7a16e8d9d2ecfp-7, -0x1.3ddca533e9f54p-7,
+      0x1.0becef748dafcp-7, -0x1.b90c7099dd397p-8, 0x1.541f2bb1ffe51p-8,
+      -0x1.d217026a669ecp-9, 0x1.0b5c7977aaf7p-9, -0x1.e0f37daef9127p-11,
+      0x1.388b5fe542a6p-12, -0x1.021a48685e287p-14, 0x1.93d4ba83d34dap-18 },
+  .ln2 = 0x1.62e42fefa39efp-1,
+  .p0 = -0x1.ffffffffffff7p-2,
+  .p1 = 0x1.55555555170d4p-2,
+  .p2 = -0x1.0000000399c27p-2,
+  .p3 = 0x1.999b2e90e94cap-3,
+  .p4 = -0x1.554e550bd501ep-3,
+  .n = 1 << V_LOG_TABLE_BITS,
+  .off = 0x3fe6900900000000
+};
+
+static svfloat64_t NOINLINE
+special_case (svfloat64_t x, svfloat64_t y, svbool_t special)
+{
+  return sv_call_f64 (asinh, x, y, special);
+}
+
+static inline svfloat64_t
+__sv_log_inline (svfloat64_t x, const struct data *d, const svbool_t pg)
+{
+  /* Double-precision SVE log, copied from SVE log implementation with some
+     cosmetic modification and special-cases removed. See that file for details
+     of the algorithm used.  */
+
+  svuint64_t ix = svreinterpret_u64 (x);
+  svuint64_t tmp = svsub_x (pg, ix, d->off);
+  svuint64_t i = svand_x (pg, svlsr_x (pg, tmp, (51 - V_LOG_TABLE_BITS)),
+			  (d->n - 1) << 1);
+  svint64_t k = svasr_x (pg, svreinterpret_s64 (tmp), 52);
+  svuint64_t iz = svsub_x (pg, ix, svand_x (pg, tmp, 0xfffULL << 52));
+  svfloat64_t z = svreinterpret_f64 (iz);
+
+  svfloat64_t invc = svld1_gather_index (pg, &__v_log_data.table[0].invc, i);
+  svfloat64_t logc = svld1_gather_index (pg, &__v_log_data.table[0].logc, i);
+
+  svfloat64_t ln2_p3 = svld1rq (svptrue_b64 (), &d->ln2);
+  svfloat64_t p1_p4 = svld1rq (svptrue_b64 (), &d->p1);
+
+  svfloat64_t r = svmla_x (pg, sv_f64 (-1.0), invc, z);
+  svfloat64_t kd = svcvt_f64_x (pg, k);
+
+  svfloat64_t hi = svmla_lane (svadd_x (pg, logc, r), kd, ln2_p3, 0);
+  svfloat64_t r2 = svmul_x (pg, r, r);
+
+  svfloat64_t y = svmla_lane (sv_f64 (d->p2), r, ln2_p3, 1);
+
+  svfloat64_t p = svmla_lane (sv_f64 (d->p0), r, p1_p4, 0);
+  y = svmla_lane (y, r2, p1_p4, 1);
+  y = svmla_x (pg, p, r2, y);
+  y = svmla_x (pg, hi, r2, y);
+  return y;
+}
+
+/* Double-precision implementation of SVE asinh(x).
+   asinh is very sensitive around 1, so it is impractical to devise a single
+   low-cost algorithm which is sufficiently accurate on a wide range of input.
+   Instead we use two different algorithms:
+   asinh(x) = sign(x) * log(|x| + sqrt(x^2 + 1)      if |x| >= 1
+	    = sign(x) * (|x| + |x|^3 * P(x^2))       otherwise
+   where log(x) is an optimized log approximation, and P(x) is a polynomial
+   shared with the scalar routine. The greatest observed error 2.51 ULP, in
+   |x| >= 1:
+   _ZGVsMxv_asinh(0x1.170469d024505p+0) got 0x1.e3181c43b0f36p-1
+				       want 0x1.e3181c43b0f39p-1.  */
+svfloat64_t SV_NAME_D1 (asinh) (svfloat64_t x, const svbool_t pg)
+{
+  const struct data *d = ptr_barrier (&data);
+
+  svuint64_t ix = svreinterpret_u64 (x);
+  svuint64_t iax = svbic_x (pg, ix, SignMask);
+  svuint64_t sign = svand_x (pg, ix, SignMask);
+  svfloat64_t ax = svreinterpret_f64 (iax);
+
+  svbool_t ge1 = svcmpge (pg, iax, One);
+  svbool_t special = svcmpge (pg, iax, Thres);
+
+  /* Option 1: |x| >= 1.
+     Compute asinh(x) according by asinh(x) = log(x + sqrt(x^2 + 1)).  */
+  svfloat64_t option_1 = sv_f64 (0);
+  if (__glibc_likely (svptest_any (pg, ge1)))
+    {
+      svfloat64_t x2 = svmul_x (pg, ax, ax);
+      option_1 = __sv_log_inline (
+	  svadd_x (pg, ax, svsqrt_x (pg, svadd_x (pg, x2, 1))), d, pg);
+    }
+
+  /* Option 2: |x| < 1.
+     Compute asinh(x) using a polynomial.
+     The largest observed error in this region is 1.51 ULPs:
+     _ZGVsMxv_asinh(0x1.fe12bf8c616a2p-1) got 0x1.c1e649ee2681bp-1
+					 want 0x1.c1e649ee2681dp-1.  */
+  svfloat64_t option_2 = sv_f64 (0);
+  if (__glibc_likely (svptest_any (pg, svnot_z (pg, ge1))))
+    {
+      svfloat64_t x2 = svmul_x (pg, ax, ax);
+      svfloat64_t x4 = svmul_x (pg, x2, x2);
+      svfloat64_t p = sv_pw_horner_17_f64_x (pg, x2, x4, d->poly);
+      option_2 = svmla_x (pg, ax, p, svmul_x (pg, x2, ax));
+    }
+
+  /* Choose the right option for each lane.  */
+  svfloat64_t y = svsel (ge1, option_1, option_2);
+
+  if (__glibc_unlikely (svptest_any (pg, special)))
+    return special_case (
+	x, svreinterpret_f64 (sveor_x (pg, svreinterpret_u64 (y), sign)),
+	special);
+  return svreinterpret_f64 (sveor_x (pg, svreinterpret_u64 (y), sign));
+}
diff --git a/sysdeps/aarch64/fpu/asinhf_advsimd.c b/sysdeps/aarch64/fpu/asinhf_advsimd.c
new file mode 100644
index 0000000000..09fd8a6143
--- /dev/null
+++ b/sysdeps/aarch64/fpu/asinhf_advsimd.c
@@ -0,0 +1,80 @@ 
+/* Single-precision vector (Advanced SIMD) asinh function
+
+   Copyright (C) 2024 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+#include "v_log1pf_inline.h"
+
+#define SignMask v_u32 (0x80000000)
+
+const static struct data
+{
+  struct v_log1pf_data log1pf_consts;
+  uint32x4_t big_bound;
+#if WANT_SIMD_EXCEPT
+  uint32x4_t tiny_bound;
+#endif
+} data = {
+  .log1pf_consts = V_LOG1PF_CONSTANTS_TABLE,
+  .big_bound = V4 (0x5f800000), /* asuint(0x1p64).  */
+#if WANT_SIMD_EXCEPT
+  .tiny_bound = V4 (0x30800000) /* asuint(0x1p-30).  */
+#endif
+};
+
+static float32x4_t NOINLINE VPCS_ATTR
+special_case (float32x4_t x, float32x4_t y, uint32x4_t special)
+{
+  return v_call_f32 (asinhf, x, y, special);
+}
+
+/* Single-precision implementation of vector asinh(x), using vector log1p.
+   Worst-case error is 2.66 ULP, at roughly +/-0.25:
+   __v_asinhf(0x1.01b04p-2) got 0x1.fe163ep-3 want 0x1.fe1638p-3.  */
+VPCS_ATTR float32x4_t NOINLINE V_NAME_F1 (asinh) (float32x4_t x)
+{
+  const struct data *dat = ptr_barrier (&data);
+  uint32x4_t iax = vbicq_u32 (vreinterpretq_u32_f32 (x), SignMask);
+  float32x4_t ax = vreinterpretq_f32_u32 (iax);
+  uint32x4_t special = vcgeq_u32 (iax, dat->big_bound);
+  float32x4_t special_arg = x;
+
+#if WANT_SIMD_EXCEPT
+  /* Sidestep tiny and large values to avoid inadvertently triggering
+     under/overflow.  */
+  special = vorrq_u32 (special, vcltq_u32 (iax, dat->tiny_bound));
+  if (__glibc_unlikely (v_any_u32 (special)))
+    {
+      ax = v_zerofy_f32 (ax, special);
+      x = v_zerofy_f32 (x, special);
+    }
+#endif
+
+  /* asinh(x) = log(x + sqrt(x * x + 1)).
+     For positive x, asinh(x) = log1p(x + x * x / (1 + sqrt(x * x + 1))).  */
+  float32x4_t d
+      = vaddq_f32 (v_f32 (1), vsqrtq_f32 (vfmaq_f32 (v_f32 (1), x, x)));
+  float32x4_t y = log1pf_inline (
+      vaddq_f32 (ax, vdivq_f32 (vmulq_f32 (ax, ax), d)), dat->log1pf_consts);
+
+  if (__glibc_unlikely (v_any_u32 (special)))
+    return special_case (special_arg, vbslq_f32 (SignMask, x, y), special);
+  return vbslq_f32 (SignMask, x, y);
+}
+libmvec_hidden_def (V_NAME_F1 (asinh))
+HALF_WIDTH_ALIAS_F1 (asinh)
diff --git a/sysdeps/aarch64/fpu/asinhf_sve.c b/sysdeps/aarch64/fpu/asinhf_sve.c
new file mode 100644
index 0000000000..d85c3a685c
--- /dev/null
+++ b/sysdeps/aarch64/fpu/asinhf_sve.c
@@ -0,0 +1,56 @@ 
+/* Single-precision vector (SVE) asinh function
+
+   Copyright (C) 2024 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+#include "sv_log1pf_inline.h"
+
+#define BigBound (0x5f800000)  /* asuint(0x1p64).  */
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t special)
+{
+  return sv_call_f32 (asinhf, x, y, special);
+}
+
+/* Single-precision SVE asinh(x) routine. Implements the same algorithm as
+   vector asinhf and log1p.
+
+   Maximum error is 2.48 ULPs:
+   SV_NAME_F1 (asinh) (0x1.008864p-3) got 0x1.ffbbbcp-4
+				     want 0x1.ffbbb8p-4.  */
+svfloat32_t SV_NAME_F1 (asinh) (svfloat32_t x, const svbool_t pg)
+{
+  svfloat32_t ax = svabs_x (pg, x);
+  svuint32_t iax = svreinterpret_u32 (ax);
+  svuint32_t sign = sveor_x (pg, svreinterpret_u32 (x), iax);
+  svbool_t special = svcmpge (pg, iax, BigBound);
+
+  /* asinh(x) = log(x + sqrt(x * x + 1)).
+     For positive x, asinh(x) = log1p(x + x * x / (1 + sqrt(x * x + 1))).  */
+  svfloat32_t ax2 = svmul_x (pg, ax, ax);
+  svfloat32_t d = svadd_x (pg, svsqrt_x (pg, svadd_x (pg, ax2, 1.0f)), 1.0f);
+  svfloat32_t y
+      = sv_log1pf_inline (svadd_x (pg, ax, svdiv_x (pg, ax2, d)), pg);
+
+  if (__glibc_unlikely (svptest_any (pg, special)))
+    return special_case (
+	x, svreinterpret_f32 (svorr_x (pg, sign, svreinterpret_u32 (y))),
+	special);
+  return svreinterpret_f32 (svorr_x (pg, sign, svreinterpret_u32 (y)));
+}
diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h
index 841330956c..eb2af35b27 100644
--- a/sysdeps/aarch64/fpu/bits/math-vector.h
+++ b/sysdeps/aarch64/fpu/bits/math-vector.h
@@ -41,6 +41,10 @@ 
 # define __DECL_SIMD_asin __DECL_SIMD_aarch64
 # undef __DECL_SIMD_asinf
 # define __DECL_SIMD_asinf __DECL_SIMD_aarch64
+# undef __DECL_SIMD_asinh
+# define __DECL_SIMD_asinh __DECL_SIMD_aarch64
+# undef __DECL_SIMD_asinhf
+# define __DECL_SIMD_asinhf __DECL_SIMD_aarch64
 # undef __DECL_SIMD_atan
 # define __DECL_SIMD_atan __DECL_SIMD_aarch64
 # undef __DECL_SIMD_atanf
@@ -131,6 +135,7 @@  __vpcs __f32x4_t _ZGVnN4vv_atan2f (__f32x4_t, __f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_acosf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_acoshf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_asinf (__f32x4_t);
+__vpcs __f32x4_t _ZGVnN4v_asinhf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_atanf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_coshf (__f32x4_t);
@@ -150,6 +155,7 @@  __vpcs __f64x2_t _ZGVnN2vv_atan2 (__f64x2_t, __f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_acos (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_acosh (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_asin (__f64x2_t);
+__vpcs __f64x2_t _ZGVnN2v_asinh (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_atan (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_cosh (__f64x2_t);
@@ -174,6 +180,7 @@  __sv_f32_t _ZGVsMxvv_atan2f (__sv_f32_t, __sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_acosf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_acoshf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_asinf (__sv_f32_t, __sv_bool_t);
+__sv_f32_t _ZGVsMxv_asinhf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_atanf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_coshf (__sv_f32_t, __sv_bool_t);
@@ -193,6 +200,7 @@  __sv_f64_t _ZGVsMxvv_atan2 (__sv_f64_t, __sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_acos (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_acosh (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_asin (__sv_f64_t, __sv_bool_t);
+__sv_f64_t _ZGVsMxv_asinh (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_atan (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_cosh (__sv_f64_t, __sv_bool_t);
diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
index f4ce1d7009..3d7177c32d 100644
--- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
@@ -26,6 +26,7 @@ 
 VPCS_VECTOR_WRAPPER (acos_advsimd, _ZGVnN2v_acos)
 VPCS_VECTOR_WRAPPER (acosh_advsimd, _ZGVnN2v_acosh)
 VPCS_VECTOR_WRAPPER (asin_advsimd, _ZGVnN2v_asin)
+VPCS_VECTOR_WRAPPER (asinh_advsimd, _ZGVnN2v_asinh)
 VPCS_VECTOR_WRAPPER (atan_advsimd, _ZGVnN2v_atan)
 VPCS_VECTOR_WRAPPER_ff (atan2_advsimd, _ZGVnN2vv_atan2)
 VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
index 0e973cc9d7..b88a2afe5c 100644
--- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
@@ -45,6 +45,7 @@ 
 SVE_VECTOR_WRAPPER (acos_sve, _ZGVsMxv_acos)
 SVE_VECTOR_WRAPPER (acosh_sve, _ZGVsMxv_acosh)
 SVE_VECTOR_WRAPPER (asin_sve, _ZGVsMxv_asin)
+SVE_VECTOR_WRAPPER (asinh_sve, _ZGVsMxv_asinh)
 SVE_VECTOR_WRAPPER (atan_sve, _ZGVsMxv_atan)
 SVE_VECTOR_WRAPPER_ff (atan2_sve, _ZGVsMxvv_atan2)
 SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
index 0ce026b5ea..533655402d 100644
--- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
@@ -26,6 +26,7 @@ 
 VPCS_VECTOR_WRAPPER (acosf_advsimd, _ZGVnN4v_acosf)
 VPCS_VECTOR_WRAPPER (acoshf_advsimd, _ZGVnN4v_acoshf)
 VPCS_VECTOR_WRAPPER (asinf_advsimd, _ZGVnN4v_asinf)
+VPCS_VECTOR_WRAPPER (asinhf_advsimd, _ZGVnN4v_asinhf)
 VPCS_VECTOR_WRAPPER (atanf_advsimd, _ZGVnN4v_atanf)
 VPCS_VECTOR_WRAPPER_ff (atan2f_advsimd, _ZGVnN4vv_atan2f)
 VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
index 398b7373e8..f7b673e335 100644
--- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
@@ -45,6 +45,7 @@ 
 SVE_VECTOR_WRAPPER (acosf_sve, _ZGVsMxv_acosf)
 SVE_VECTOR_WRAPPER (acoshf_sve, _ZGVsMxv_acoshf)
 SVE_VECTOR_WRAPPER (asinf_sve, _ZGVsMxv_asinf)
+SVE_VECTOR_WRAPPER (asinhf_sve, _ZGVsMxv_asinhf)
 SVE_VECTOR_WRAPPER (atanf_sve, _ZGVsMxv_atanf)
 SVE_VECTOR_WRAPPER_ff (atan2f_sve, _ZGVsMxvv_atan2f)
 SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
index 3afdd35043..dc8cb08b3a 100644
--- a/sysdeps/aarch64/libm-test-ulps
+++ b/sysdeps/aarch64/libm-test-ulps
@@ -90,11 +90,19 @@  double: 2
 float: 2
 ldouble: 4
 
+Function: "asinh_advsimd":
+double: 1
+float: 2
+
 Function: "asinh_downward":
 double: 3
 float: 3
 ldouble: 4
 
+Function: "asinh_sve":
+double: 1
+float: 2
+
 Function: "asinh_towardzero":
 double: 2
 float: 2
diff --git a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
index f5aaa519f2..f288afdfdd 100644
--- a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
+++ b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
@@ -75,15 +75,20 @@  GLIBC_2.39 _ZGVsMxvv_atan2 F
 GLIBC_2.39 _ZGVsMxvv_atan2f F
 GLIBC_2.40 _ZGVnN2v_acosh F
 GLIBC_2.40 _ZGVnN2v_acoshf F
+GLIBC_2.40 _ZGVnN2v_asinh F
+GLIBC_2.40 _ZGVnN2v_asinhf F
 GLIBC_2.40 _ZGVnN2v_cosh F
 GLIBC_2.40 _ZGVnN2v_coshf F
 GLIBC_2.40 _ZGVnN2v_erf F
 GLIBC_2.40 _ZGVnN2v_erff F
 GLIBC_2.40 _ZGVnN4v_acoshf F
+GLIBC_2.40 _ZGVnN4v_asinhf F
 GLIBC_2.40 _ZGVnN4v_coshf F
 GLIBC_2.40 _ZGVnN4v_erff F
 GLIBC_2.40 _ZGVsMxv_acosh F
 GLIBC_2.40 _ZGVsMxv_acoshf F
+GLIBC_2.40 _ZGVsMxv_asinh F
+GLIBC_2.40 _ZGVsMxv_asinhf F
 GLIBC_2.40 _ZGVsMxv_cosh F
 GLIBC_2.40 _ZGVsMxv_coshf F
 GLIBC_2.40 _ZGVsMxv_erf F