[3/5] aarch64: Add vector implementations of log2 routines

Message ID 20231005161052.11878-3-Joe.Ramsay@arm.com
State Committed
Commit a8e3ab3074d448ff3e58ac8f850d955dfed830ad
Headers
Series [v2,1/5] aarch64: Add vector implementations of tan routines |

Checks

Context Check Description
redhat-pt-bot/TryBot-apply_patch success Patch applied to master at the time it was sent
linaro-tcwg-bot/tcwg_glibc_build--master-arm success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-arm success Testing passed
linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 success Testing passed

Commit Message

Joe Ramsay Oct. 5, 2023, 4:10 p.m. UTC
  A table is also added, which is shared between AdvSIMD and SVE log2.
---
Changes from v1:
* Transpose table layout for zipwise access
* Use half-vectors for AdvSIMD special-case comparison
* Optimise return values
Thanks,
Joe
 sysdeps/aarch64/fpu/Makefile                  |   4 +-
 sysdeps/aarch64/fpu/Versions                  |   4 +
 sysdeps/aarch64/fpu/bits/math-vector.h        |   4 +
 sysdeps/aarch64/fpu/log2_advsimd.c            | 109 ++++++++++++
 sysdeps/aarch64/fpu/log2_sve.c                |  73 ++++++++
 sysdeps/aarch64/fpu/log2f_advsimd.c           |  77 ++++++++
 sysdeps/aarch64/fpu/log2f_sve.c               |  86 +++++++++
 .../fpu/test-double-advsimd-wrappers.c        |   1 +
 .../aarch64/fpu/test-double-sve-wrappers.c    |   1 +
 .../aarch64/fpu/test-float-advsimd-wrappers.c |   1 +
 sysdeps/aarch64/fpu/test-float-sve-wrappers.c |   1 +
 sysdeps/aarch64/fpu/v_log2_data.c             | 165 ++++++++++++++++++
 sysdeps/aarch64/fpu/vecmath_config.h          |  12 ++
 sysdeps/aarch64/libm-test-ulps                |   8 +
 .../unix/sysv/linux/aarch64/libmvec.abilist   |   4 +
 15 files changed, 549 insertions(+), 1 deletion(-)
 create mode 100644 sysdeps/aarch64/fpu/log2_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/log2_sve.c
 create mode 100644 sysdeps/aarch64/fpu/log2f_advsimd.c
 create mode 100644 sysdeps/aarch64/fpu/log2f_sve.c
 create mode 100644 sysdeps/aarch64/fpu/v_log2_data.c
  

Patch

diff --git a/sysdeps/aarch64/fpu/Makefile b/sysdeps/aarch64/fpu/Makefile
index 9c7c768301..c3f204ff0d 100644
--- a/sysdeps/aarch64/fpu/Makefile
+++ b/sysdeps/aarch64/fpu/Makefile
@@ -2,6 +2,7 @@  libmvec-supported-funcs = cos \
                           exp \
                           exp2 \
                           log \
+                          log2 \
                           sin \
                           tan
 
@@ -16,7 +17,8 @@  libmvec-support = $(addsuffix f_advsimd,$(float-advsimd-funcs)) \
                   $(addsuffix f_sve,$(float-sve-funcs)) \
                   $(addsuffix _sve,$(double-sve-funcs)) \
                   v_log_data \
-                  v_exp_data
+                  v_exp_data \
+                  v_log2_data
 endif
 
 sve-cflags = -march=armv8-a+sve
diff --git a/sysdeps/aarch64/fpu/Versions b/sysdeps/aarch64/fpu/Versions
index 05de4325d5..ffe62a6f65 100644
--- a/sysdeps/aarch64/fpu/Versions
+++ b/sysdeps/aarch64/fpu/Versions
@@ -22,6 +22,10 @@  libmvec {
     _ZGVnN2v_exp2;
     _ZGVsMxv_exp2f;
     _ZGVsMxv_exp2;
+    _ZGVnN4v_log2f;
+    _ZGVnN2v_log2;
+    _ZGVsMxv_log2f;
+    _ZGVsMxv_log2;
     _ZGVnN4v_tanf;
     _ZGVnN2v_tan;
     _ZGVsMxv_tanf;
diff --git a/sysdeps/aarch64/fpu/bits/math-vector.h b/sysdeps/aarch64/fpu/bits/math-vector.h
index 50921b22e5..92f214b194 100644
--- a/sysdeps/aarch64/fpu/bits/math-vector.h
+++ b/sysdeps/aarch64/fpu/bits/math-vector.h
@@ -53,6 +53,7 @@  __vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_expf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_exp2f (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t);
+__vpcs __f32x4_t _ZGVnN4v_log2f (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
 __vpcs __f32x4_t _ZGVnN4v_tanf (__f32x4_t);
 
@@ -60,6 +61,7 @@  __vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_exp (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_exp2 (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t);
+__vpcs __f64x2_t _ZGVnN2v_log2 (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
 __vpcs __f64x2_t _ZGVnN2v_tan (__f64x2_t);
 
@@ -72,6 +74,7 @@  __sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_expf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_exp2f (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t);
+__sv_f32_t _ZGVsMxv_log2f (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
 __sv_f32_t _ZGVsMxv_tanf (__sv_f32_t, __sv_bool_t);
 
@@ -79,6 +82,7 @@  __sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_exp (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_exp2 (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t);
+__sv_f64_t _ZGVsMxv_log2 (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
 __sv_f64_t _ZGVsMxv_tan (__sv_f64_t, __sv_bool_t);
 
diff --git a/sysdeps/aarch64/fpu/log2_advsimd.c b/sysdeps/aarch64/fpu/log2_advsimd.c
new file mode 100644
index 0000000000..4f29924bfa
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log2_advsimd.c
@@ -0,0 +1,109 @@ 
+/* Double-precision vector (AdvSIMD) exp2 function
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+#include "poly_advsimd_f64.h"
+
+#define N (1 << V_LOG2_TABLE_BITS)
+
+static const struct data
+{
+  uint64x2_t min_norm;
+  uint32x4_t special_bound;
+  float64x2_t poly[5];
+  float64x2_t invln2;
+  uint64x2_t sign_exp_mask;
+} data = {
+  /* Each coefficient was generated to approximate log(r) for |r| < 0x1.fp-9
+     and N = 128, then scaled by log2(e) in extended precision and rounded back
+     to double precision.  */
+  .poly = { V2 (-0x1.71547652b83p-1), V2 (0x1.ec709dc340953p-2),
+	    V2 (-0x1.71547651c8f35p-2), V2 (0x1.2777ebe12dda5p-2),
+	    V2 (-0x1.ec738d616fe26p-3) },
+  .invln2 = V2 (0x1.71547652b82fep0),
+  .min_norm = V2 (0x0010000000000000), /* asuint64(0x1p-1022).  */
+  .special_bound = V4 (0x7fe00000),    /* asuint64(inf) - min_norm.  */
+  .sign_exp_mask = V2 (0xfff0000000000000),
+};
+
+#define Off v_u64 (0x3fe6900900000000)
+#define IndexMask (N - 1)
+
+struct entry
+{
+  float64x2_t invc;
+  float64x2_t log2c;
+};
+
+static inline struct entry
+lookup (uint64x2_t i)
+{
+  struct entry e;
+  uint64_t i0 = (i[0] >> (52 - V_LOG2_TABLE_BITS)) & IndexMask;
+  uint64_t i1 = (i[1] >> (52 - V_LOG2_TABLE_BITS)) & IndexMask;
+  float64x2_t e0 = vld1q_f64 (&__v_log2_data.table[i0].invc);
+  float64x2_t e1 = vld1q_f64 (&__v_log2_data.table[i1].invc);
+  e.invc = vuzp1q_f64 (e0, e1);
+  e.log2c = vuzp2q_f64 (e0, e1);
+  return e;
+}
+
+static float64x2_t VPCS_ATTR NOINLINE
+special_case (float64x2_t x, float64x2_t y, float64x2_t w, float64x2_t r2,
+	      uint32x2_t special)
+{
+  return v_call_f64 (log2, x, vfmaq_f64 (w, r2, y), vmovl_u32 (special));
+}
+
+/* Double-precision vector log2 routine. Implements the same algorithm as
+   vector log10, with coefficients and table entries scaled in extended
+   precision. The maximum observed error is 2.58 ULP:
+   _ZGVnN2v_log2(0x1.0b556b093869bp+0) got 0x1.fffb34198d9dap-5
+				      want 0x1.fffb34198d9ddp-5.  */
+float64x2_t VPCS_ATTR V_NAME_D1 (log2) (float64x2_t x)
+{
+  const struct data *d = ptr_barrier (&data);
+  uint64x2_t ix = vreinterpretq_u64_f64 (x);
+  uint32x2_t special = vcge_u32 (vsubhn_u64 (ix, d->min_norm),
+				 vget_low_u32 (d->special_bound));
+
+  /* x = 2^k z; where z is in range [Off,2*Off) and exact.
+     The range is split into N subintervals.
+     The ith subinterval contains z and c is near its center.  */
+  uint64x2_t tmp = vsubq_u64 (ix, Off);
+  int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52);
+  uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, d->sign_exp_mask));
+  float64x2_t z = vreinterpretq_f64_u64 (iz);
+
+  struct entry e = lookup (tmp);
+
+  /* log2(x) = log1p(z/c-1)/log(2) + log2(c) + k.  */
+
+  float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc);
+  float64x2_t kd = vcvtq_f64_s64 (k);
+  float64x2_t w = vfmaq_f64 (e.log2c, r, d->invln2);
+
+  float64x2_t r2 = vmulq_f64 (r, r);
+  float64x2_t y = v_pw_horner_4_f64 (r, r2, d->poly);
+  w = vaddq_f64 (kd, w);
+
+  if (__glibc_unlikely (v_any_u32h (special)))
+    return special_case (x, y, w, r2, special);
+  return vfmaq_f64 (w, r2, y);
+}
diff --git a/sysdeps/aarch64/fpu/log2_sve.c b/sysdeps/aarch64/fpu/log2_sve.c
new file mode 100644
index 0000000000..0ef6669fd5
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log2_sve.c
@@ -0,0 +1,73 @@ 
+/* Double-precision vector (SVE) log2 function
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+#include "poly_sve_f64.h"
+
+#define N (1 << V_LOG2_TABLE_BITS)
+#define Off 0x3fe6900900000000
+#define Max (0x7ff0000000000000)
+#define Min (0x0010000000000000)
+#define Thresh (0x7fe0000000000000) /* Max - Min.  */
+
+static svfloat64_t NOINLINE
+special_case (svfloat64_t x, svfloat64_t y, svbool_t cmp)
+{
+  return sv_call_f64 (log2, x, y, cmp);
+}
+
+/* Double-precision SVE log2 routine.
+   Implements the same algorithm as AdvSIMD log10, with coefficients and table
+   entries scaled in extended precision.
+   The maximum observed error is 2.58 ULP:
+   SV_NAME_D1 (log2)(0x1.0b556b093869bp+0) got 0x1.fffb34198d9dap-5
+					  want 0x1.fffb34198d9ddp-5.  */
+svfloat64_t SV_NAME_D1 (log2) (svfloat64_t x, const svbool_t pg)
+{
+  svuint64_t ix = svreinterpret_u64 (x);
+  svbool_t special = svcmpge (pg, svsub_x (pg, ix, Min), Thresh);
+
+  /* x = 2^k z; where z is in range [Off,2*Off) and exact.
+     The range is split into N subintervals.
+     The ith subinterval contains z and c is near its center.  */
+  svuint64_t tmp = svsub_x (pg, ix, Off);
+  svuint64_t i = svlsr_x (pg, tmp, 51 - V_LOG2_TABLE_BITS);
+  i = svand_x (pg, i, (N - 1) << 1);
+  svfloat64_t k = svcvt_f64_x (pg, svasr_x (pg, svreinterpret_s64 (tmp), 52));
+  svfloat64_t z = svreinterpret_f64 (
+      svsub_x (pg, ix, svand_x (pg, tmp, 0xfffULL << 52)));
+
+  svfloat64_t invc = svld1_gather_index (pg, &__v_log2_data.table[0].invc, i);
+  svfloat64_t log2c
+      = svld1_gather_index (pg, &__v_log2_data.table[0].log2c, i);
+
+  /* log2(x) = log1p(z/c-1)/log(2) + log2(c) + k.  */
+
+  svfloat64_t r = svmad_x (pg, invc, z, -1.0);
+  svfloat64_t w = svmla_x (pg, log2c, r, __v_log2_data.invln2);
+
+  svfloat64_t r2 = svmul_x (pg, r, r);
+  svfloat64_t y = sv_pw_horner_4_f64_x (pg, r, r2, __v_log2_data.poly);
+  w = svadd_x (pg, k, w);
+
+  if (__glibc_unlikely (svptest_any (pg, special)))
+    return special_case (x, svmla_x (svnot_z (pg, special), w, r2, y),
+			 special);
+  return svmla_x (pg, w, r2, y);
+}
diff --git a/sysdeps/aarch64/fpu/log2f_advsimd.c b/sysdeps/aarch64/fpu/log2f_advsimd.c
new file mode 100644
index 0000000000..e913bcda18
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log2f_advsimd.c
@@ -0,0 +1,77 @@ 
+/* Single-precision vector (AdvSIMD) exp2 function
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "v_math.h"
+#include "poly_advsimd_f32.h"
+
+static const struct data
+{
+  uint32x4_t min_norm;
+  uint16x8_t special_bound;
+  uint32x4_t off, mantissa_mask;
+  float32x4_t poly[9];
+} data = {
+  /* Coefficients generated using Remez algorithm approximate
+     log2(1+r)/r for r in [ -1/3, 1/3 ].
+     rel error: 0x1.c4c4b0cp-26.  */
+  .poly = { V4 (0x1.715476p0f), /* (float)(1 / ln(2)).  */
+	    V4 (-0x1.715458p-1f), V4 (0x1.ec701cp-2f), V4 (-0x1.7171a4p-2f),
+	    V4 (0x1.27a0b8p-2f), V4 (-0x1.e5143ep-3f), V4 (0x1.9d8ecap-3f),
+	    V4 (-0x1.c675bp-3f), V4 (0x1.9e495p-3f) },
+  .min_norm = V4 (0x00800000),
+  .special_bound = V8 (0x7f00), /* asuint32(inf) - min_norm.  */
+  .off = V4 (0x3f2aaaab),	/* 0.666667.  */
+  .mantissa_mask = V4 (0x007fffff),
+};
+
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t x, float32x4_t n, float32x4_t p, float32x4_t r,
+	      uint16x4_t cmp)
+{
+  /* Fall back to scalar code.  */
+  return v_call_f32 (log2f, x, vfmaq_f32 (n, p, r), vmovl_u16 (cmp));
+}
+
+/* Fast implementation for single precision AdvSIMD log2,
+   relies on same argument reduction as AdvSIMD logf.
+   Maximum error: 2.48 ULPs
+   _ZGVnN4v_log2f(0x1.558174p+0) got 0x1.a9be84p-2
+				want 0x1.a9be8p-2.  */
+float32x4_t VPCS_ATTR V_NAME_F1 (log2) (float32x4_t x)
+{
+  const struct data *d = ptr_barrier (&data);
+  uint32x4_t u = vreinterpretq_u32_f32 (x);
+  uint16x4_t special = vcge_u16 (vsubhn_u32 (u, d->min_norm),
+				 vget_low_u16 (d->special_bound));
+
+  /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
+  u = vsubq_u32 (u, d->off);
+  float32x4_t n = vcvtq_f32_s32 (
+      vshrq_n_s32 (vreinterpretq_s32_u32 (u), 23)); /* signextend.  */
+  u = vaddq_u32 (vandq_u32 (u, d->mantissa_mask), d->off);
+  float32x4_t r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f));
+
+  /* y = log2(1+r) + n.  */
+  float32x4_t r2 = vmulq_f32 (r, r);
+  float32x4_t p = v_pw_horner_8_f32 (r, r2, d->poly);
+
+  if (__glibc_unlikely (v_any_u16h (special)))
+    return special_case (x, n, p, r, special);
+  return vfmaq_f32 (n, p, r);
+}
diff --git a/sysdeps/aarch64/fpu/log2f_sve.c b/sysdeps/aarch64/fpu/log2f_sve.c
new file mode 100644
index 0000000000..d00813ee24
--- /dev/null
+++ b/sysdeps/aarch64/fpu/log2f_sve.c
@@ -0,0 +1,86 @@ 
+/* Single-precision vector (SVE) log2 function
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+
+static const struct data
+{
+  float poly_02468[5];
+  float poly_1357[4];
+} data = {
+  .poly_1357 = {
+    /* Coefficients copied from the AdvSIMD routine, then rearranged so that coeffs
+       1, 3, 5 and 7 can be loaded as a single quad-word, hence used with _lane
+       variant of MLA intrinsic.  */
+    -0x1.715458p-1f, -0x1.7171a4p-2f, -0x1.e5143ep-3f, -0x1.c675bp-3f
+  },
+  .poly_02468 = { 0x1.715476p0f, 0x1.ec701cp-2f, 0x1.27a0b8p-2f,
+		  0x1.9d8ecap-3f, 0x1.9e495p-3f },
+};
+
+#define Min (0x00800000)
+#define Max (0x7f800000)
+#define Thres (0x7f000000) /* Max - Min.  */
+#define MantissaMask (0x007fffff)
+#define Off (0x3f2aaaab) /* 0.666667.  */
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t cmp)
+{
+  return sv_call_f32 (log2f, x, y, cmp);
+}
+
+/* Optimised implementation of SVE log2f, using the same algorithm
+   and polynomial as AdvSIMD log2f.
+   Maximum error is 2.48 ULPs:
+   SV_NAME_F1 (log2)(0x1.558174p+0) got 0x1.a9be84p-2
+				   want 0x1.a9be8p-2.  */
+svfloat32_t SV_NAME_F1 (log2) (svfloat32_t x, const svbool_t pg)
+{
+  const struct data *d = ptr_barrier (&data);
+
+  svuint32_t u = svreinterpret_u32 (x);
+  svbool_t special = svcmpge (pg, svsub_x (pg, u, Min), Thres);
+
+  /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
+  u = svsub_x (pg, u, Off);
+  svfloat32_t n = svcvt_f32_x (
+      pg, svasr_x (pg, svreinterpret_s32 (u), 23)); /* Sign-extend.  */
+  u = svand_x (pg, u, MantissaMask);
+  u = svadd_x (pg, u, Off);
+  svfloat32_t r = svsub_x (pg, svreinterpret_f32 (u), 1.0f);
+
+  /* y = log2(1+r) + n.  */
+  svfloat32_t r2 = svmul_x (pg, r, r);
+
+  /* Evaluate polynomial using pairwise Horner scheme.  */
+  svfloat32_t p_1357 = svld1rq (svptrue_b32 (), &d->poly_1357[0]);
+  svfloat32_t q_01 = svmla_lane (sv_f32 (d->poly_02468[0]), r, p_1357, 0);
+  svfloat32_t q_23 = svmla_lane (sv_f32 (d->poly_02468[1]), r, p_1357, 1);
+  svfloat32_t q_45 = svmla_lane (sv_f32 (d->poly_02468[2]), r, p_1357, 2);
+  svfloat32_t q_67 = svmla_lane (sv_f32 (d->poly_02468[3]), r, p_1357, 3);
+  svfloat32_t y = svmla_x (pg, q_67, r2, sv_f32 (d->poly_02468[4]));
+  y = svmla_x (pg, q_45, r2, y);
+  y = svmla_x (pg, q_23, r2, y);
+  y = svmla_x (pg, q_01, r2, y);
+
+  if (__glibc_unlikely (svptest_any (pg, special)))
+    return special_case (x, svmla_x (svnot_z (pg, special), n, r, y), special);
+  return svmla_x (pg, n, r, y);
+}
diff --git a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
index b2b36fd847..d30dcd6f95 100644
--- a/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-advsimd-wrappers.c
@@ -27,5 +27,6 @@  VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
 VPCS_VECTOR_WRAPPER (exp_advsimd, _ZGVnN2v_exp)
 VPCS_VECTOR_WRAPPER (exp2_advsimd, _ZGVnN2v_exp2)
 VPCS_VECTOR_WRAPPER (log_advsimd, _ZGVnN2v_log)
+VPCS_VECTOR_WRAPPER (log2_advsimd, _ZGVnN2v_log2)
 VPCS_VECTOR_WRAPPER (sin_advsimd, _ZGVnN2v_sin)
 VPCS_VECTOR_WRAPPER (tan_advsimd, _ZGVnN2v_tan)
diff --git a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
index 88b76ed678..22a8479100 100644
--- a/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-double-sve-wrappers.c
@@ -36,5 +36,6 @@  SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
 SVE_VECTOR_WRAPPER (exp_sve, _ZGVsMxv_exp)
 SVE_VECTOR_WRAPPER (exp2_sve, _ZGVsMxv_exp2)
 SVE_VECTOR_WRAPPER (log_sve, _ZGVsMxv_log)
+SVE_VECTOR_WRAPPER (log2_sve, _ZGVsMxv_log2)
 SVE_VECTOR_WRAPPER (sin_sve, _ZGVsMxv_sin)
 SVE_VECTOR_WRAPPER (tan_sve, _ZGVsMxv_tan)
diff --git a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
index 02ab609b5a..e8f7f47c67 100644
--- a/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-advsimd-wrappers.c
@@ -27,5 +27,6 @@  VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
 VPCS_VECTOR_WRAPPER (expf_advsimd, _ZGVnN4v_expf)
 VPCS_VECTOR_WRAPPER (exp2f_advsimd, _ZGVnN4v_exp2f)
 VPCS_VECTOR_WRAPPER (logf_advsimd, _ZGVnN4v_logf)
+VPCS_VECTOR_WRAPPER (log2f_advsimd, _ZGVnN4v_log2f)
 VPCS_VECTOR_WRAPPER (sinf_advsimd, _ZGVnN4v_sinf)
 VPCS_VECTOR_WRAPPER (tanf_advsimd, _ZGVnN4v_tanf)
diff --git a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
index fa41ce09d8..f5e9584265 100644
--- a/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
+++ b/sysdeps/aarch64/fpu/test-float-sve-wrappers.c
@@ -36,5 +36,6 @@  SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
 SVE_VECTOR_WRAPPER (expf_sve, _ZGVsMxv_expf)
 SVE_VECTOR_WRAPPER (exp2f_sve, _ZGVsMxv_exp2f)
 SVE_VECTOR_WRAPPER (logf_sve, _ZGVsMxv_logf)
+SVE_VECTOR_WRAPPER (log2f_sve, _ZGVsMxv_log2f)
 SVE_VECTOR_WRAPPER (sinf_sve, _ZGVsMxv_sinf)
 SVE_VECTOR_WRAPPER (tanf_sve, _ZGVsMxv_tanf)
diff --git a/sysdeps/aarch64/fpu/v_log2_data.c b/sysdeps/aarch64/fpu/v_log2_data.c
new file mode 100644
index 0000000000..4fb126bf31
--- /dev/null
+++ b/sysdeps/aarch64/fpu/v_log2_data.c
@@ -0,0 +1,165 @@ 
+/* Coefficients and table entries for vector log2
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "vecmath_config.h"
+
+#define N (1 << V_LOG2_TABLE_BITS)
+
+const struct v_log2_data __v_log2_data = {
+
+  /* Each coefficient was generated to approximate log(r) for |r| < 0x1.fp-9
+     and N = 128, then scaled by log2(e) in extended precision and rounded back
+     to double precision.  */
+  .poly = { -0x1.71547652b83p-1, 0x1.ec709dc340953p-2, -0x1.71547651c8f35p-2,
+	    0x1.2777ebe12dda5p-2, -0x1.ec738d616fe26p-3 },
+
+  .invln2 = 0x1.71547652b82fep0,
+
+  /* Derived from tables in v_log_data.c in a similar way as v_log10_data.c.
+     This means invc is unchanged and log2c was calculated by scaling log(c) by
+     log2(e) in extended precision and rounding back to double precision.  */
+  .table = { { 0x1.6a133d0dec120p+0, -0x1.00130d57f5fadp-1 },
+	     { 0x1.6815f2f3e42edp+0, -0x1.f802661bd725ep-2 },
+	     { 0x1.661e39be1ac9ep+0, -0x1.efea1c6f73a5bp-2 },
+	     { 0x1.642bfa30ac371p+0, -0x1.e7dd1dcd06f05p-2 },
+	     { 0x1.623f1d916f323p+0, -0x1.dfdb4ae024809p-2 },
+	     { 0x1.60578da220f65p+0, -0x1.d7e484d101958p-2 },
+	     { 0x1.5e75349dea571p+0, -0x1.cff8ad452f6ep-2 },
+	     { 0x1.5c97fd387a75ap+0, -0x1.c817a666c997fp-2 },
+	     { 0x1.5abfd2981f200p+0, -0x1.c04152d640419p-2 },
+	     { 0x1.58eca051dc99cp+0, -0x1.b87595a3f64b2p-2 },
+	     { 0x1.571e526d9df12p+0, -0x1.b0b4526c44d07p-2 },
+	     { 0x1.5554d555b3fcbp+0, -0x1.a8fd6d1a90f5ep-2 },
+	     { 0x1.539015e2a20cdp+0, -0x1.a150ca2559fc6p-2 },
+	     { 0x1.51d0014ee0164p+0, -0x1.99ae4e62cca29p-2 },
+	     { 0x1.50148538cd9eep+0, -0x1.9215df1a1e842p-2 },
+	     { 0x1.4e5d8f9f698a1p+0, -0x1.8a8761fe1f0d9p-2 },
+	     { 0x1.4cab0edca66bep+0, -0x1.8302bd1cc9a54p-2 },
+	     { 0x1.4afcf1a9db874p+0, -0x1.7b87d6fb437f6p-2 },
+	     { 0x1.495327136e16fp+0, -0x1.741696673a86dp-2 },
+	     { 0x1.47ad9e84af28fp+0, -0x1.6caee2b3c6fe4p-2 },
+	     { 0x1.460c47b39ae15p+0, -0x1.6550a3666c27ap-2 },
+	     { 0x1.446f12b278001p+0, -0x1.5dfbc08de02a4p-2 },
+	     { 0x1.42d5efdd720ecp+0, -0x1.56b022766c84ap-2 },
+	     { 0x1.4140cfe001a0fp+0, -0x1.4f6db1c955536p-2 },
+	     { 0x1.3fafa3b421f69p+0, -0x1.4834579063054p-2 },
+	     { 0x1.3e225c9c8ece5p+0, -0x1.4103fd2249a76p-2 },
+	     { 0x1.3c98ec29a211ap+0, -0x1.39dc8c3fe6dabp-2 },
+	     { 0x1.3b13442a413fep+0, -0x1.32bdeed4b5c8fp-2 },
+	     { 0x1.399156baa3c54p+0, -0x1.2ba80f41e20ddp-2 },
+	     { 0x1.38131639b4cdbp+0, -0x1.249ad8332f4a7p-2 },
+	     { 0x1.36987540fbf53p+0, -0x1.1d96347e7f3ebp-2 },
+	     { 0x1.352166b648f61p+0, -0x1.169a0f7d6604ap-2 },
+	     { 0x1.33adddb3eb575p+0, -0x1.0fa654a221909p-2 },
+	     { 0x1.323dcd99fc1d3p+0, -0x1.08baefcf8251ap-2 },
+	     { 0x1.30d129fefc7d2p+0, -0x1.01d7cd14deecdp-2 },
+	     { 0x1.2f67e6b72fe7dp+0, -0x1.f5f9b1ad55495p-3 },
+	     { 0x1.2e01f7cf8b187p+0, -0x1.e853ff76a77afp-3 },
+	     { 0x1.2c9f518ddc86ep+0, -0x1.dabe5d624cba1p-3 },
+	     { 0x1.2b3fe86e5f413p+0, -0x1.cd38a5cef4822p-3 },
+	     { 0x1.29e3b1211b25cp+0, -0x1.bfc2b38d315f9p-3 },
+	     { 0x1.288aa08b373cfp+0, -0x1.b25c61f5edd0fp-3 },
+	     { 0x1.2734abcaa8467p+0, -0x1.a5058d18e9cacp-3 },
+	     { 0x1.25e1c82459b81p+0, -0x1.97be1113e47a3p-3 },
+	     { 0x1.2491eb1ad59c5p+0, -0x1.8a85cafdf5e27p-3 },
+	     { 0x1.23450a54048b5p+0, -0x1.7d5c97e8fc45bp-3 },
+	     { 0x1.21fb1bb09e578p+0, -0x1.704255d6486e4p-3 },
+	     { 0x1.20b415346d8f7p+0, -0x1.6336e2cedd7bfp-3 },
+	     { 0x1.1f6fed179a1acp+0, -0x1.563a1d9b0cc6ap-3 },
+	     { 0x1.1e2e99b93c7b3p+0, -0x1.494be541aaa6fp-3 },
+	     { 0x1.1cf011a7a882ap+0, -0x1.3c6c1964dd0f2p-3 },
+	     { 0x1.1bb44b97dba5ap+0, -0x1.2f9a99f19a243p-3 },
+	     { 0x1.1a7b3e66cdd4fp+0, -0x1.22d747344446p-3 },
+	     { 0x1.1944e11dc56cdp+0, -0x1.1622020d4f7f5p-3 },
+	     { 0x1.18112aebb1a6ep+0, -0x1.097aabb3553f3p-3 },
+	     { 0x1.16e013231b7e9p+0, -0x1.f9c24b48014c5p-4 },
+	     { 0x1.15b1913f156cfp+0, -0x1.e0aaa3bdc858ap-4 },
+	     { 0x1.14859cdedde13p+0, -0x1.c7ae257c952d6p-4 },
+	     { 0x1.135c2dc68cfa4p+0, -0x1.aecc960a03e58p-4 },
+	     { 0x1.12353bdb01684p+0, -0x1.9605bb724d541p-4 },
+	     { 0x1.1110bf25b85b4p+0, -0x1.7d595ca7147cep-4 },
+	     { 0x1.0feeafd2f8577p+0, -0x1.64c74165002d9p-4 },
+	     { 0x1.0ecf062c51c3bp+0, -0x1.4c4f31c86d344p-4 },
+	     { 0x1.0db1baa076c8bp+0, -0x1.33f0f70388258p-4 },
+	     { 0x1.0c96c5bb3048ep+0, -0x1.1bac5abb3037dp-4 },
+	     { 0x1.0b7e20263e070p+0, -0x1.0381272495f21p-4 },
+	     { 0x1.0a67c2acd0ce3p+0, -0x1.d6de4eba2de2ap-5 },
+	     { 0x1.0953a6391e982p+0, -0x1.a6ec4e8156898p-5 },
+	     { 0x1.0841c3caea380p+0, -0x1.772be542e3e1bp-5 },
+	     { 0x1.07321489b13eap+0, -0x1.479cadcde852dp-5 },
+	     { 0x1.062491aee9904p+0, -0x1.183e4265faa5p-5 },
+	     { 0x1.05193497a7cc5p+0, -0x1.d2207fdaa1b85p-6 },
+	     { 0x1.040ff6b5f5e9fp+0, -0x1.742486cb4a6a2p-6 },
+	     { 0x1.0308d19aa6127p+0, -0x1.1687d77cfc299p-6 },
+	     { 0x1.0203beedb0c67p+0, -0x1.7293623a6b5dep-7 },
+	     { 0x1.010037d38bcc2p+0, -0x1.70ec80ec8f25dp-8 },
+	     { 1.0, 0.0 },
+	     { 0x1.fc06d493cca10p-1, 0x1.704c1ca6b6bc9p-7 },
+	     { 0x1.f81e6ac3b918fp-1, 0x1.6eac8ba664beap-6 },
+	     { 0x1.f44546ef18996p-1, 0x1.11e67d040772dp-5 },
+	     { 0x1.f07b10382c84bp-1, 0x1.6bc665e2105dep-5 },
+	     { 0x1.ecbf7070e59d4p-1, 0x1.c4f8a9772bf1dp-5 },
+	     { 0x1.e91213f715939p-1, 0x1.0ebff10fbb951p-4 },
+	     { 0x1.e572a9a75f7b7p-1, 0x1.3aaf4d7805d11p-4 },
+	     { 0x1.e1e0e2c530207p-1, 0x1.664ba81a4d717p-4 },
+	     { 0x1.de5c72d8a8be3p-1, 0x1.9196387da6de4p-4 },
+	     { 0x1.dae50fa5658ccp-1, 0x1.bc902f2b7796p-4 },
+	     { 0x1.d77a71145a2dap-1, 0x1.e73ab5f584f28p-4 },
+	     { 0x1.d41c51166623ep-1, 0x1.08cb78510d232p-3 },
+	     { 0x1.d0ca6ba0bb29fp-1, 0x1.1dd2fe2f0dcb5p-3 },
+	     { 0x1.cd847e8e59681p-1, 0x1.32b4784400df4p-3 },
+	     { 0x1.ca4a499693e00p-1, 0x1.47706f3d49942p-3 },
+	     { 0x1.c71b8e399e821p-1, 0x1.5c0768ee4a4dcp-3 },
+	     { 0x1.c3f80faf19077p-1, 0x1.7079e86fc7c6dp-3 },
+	     { 0x1.c0df92dc2b0ecp-1, 0x1.84c86e1183467p-3 },
+	     { 0x1.bdd1de3cbb542p-1, 0x1.98f377a34b499p-3 },
+	     { 0x1.baceb9e1007a3p-1, 0x1.acfb803bc924bp-3 },
+	     { 0x1.b7d5ef543e55ep-1, 0x1.c0e10098b025fp-3 },
+	     { 0x1.b4e749977d953p-1, 0x1.d4a46efe103efp-3 },
+	     { 0x1.b20295155478ep-1, 0x1.e8463f45b8d0bp-3 },
+	     { 0x1.af279f8e82be2p-1, 0x1.fbc6e3228997fp-3 },
+	     { 0x1.ac5638197fdf3p-1, 0x1.079364f2e5aa8p-2 },
+	     { 0x1.a98e2f102e087p-1, 0x1.1133306010a63p-2 },
+	     { 0x1.a6cf5606d05c1p-1, 0x1.1ac309631bd17p-2 },
+	     { 0x1.a4197fc04d746p-1, 0x1.24432485370c1p-2 },
+	     { 0x1.a16c80293dc01p-1, 0x1.2db3b5449132fp-2 },
+	     { 0x1.9ec82c4dc5bc9p-1, 0x1.3714ee1d7a32p-2 },
+	     { 0x1.9c2c5a491f534p-1, 0x1.406700ab52c94p-2 },
+	     { 0x1.9998e1480b618p-1, 0x1.49aa1d87522b2p-2 },
+	     { 0x1.970d9977c6c2dp-1, 0x1.52de746d7ecb2p-2 },
+	     { 0x1.948a5c023d212p-1, 0x1.5c0434336b343p-2 },
+	     { 0x1.920f0303d6809p-1, 0x1.651b8ad6c90d1p-2 },
+	     { 0x1.8f9b698a98b45p-1, 0x1.6e24a56ab5831p-2 },
+	     { 0x1.8d2f6b81726f6p-1, 0x1.771fb04ec29b1p-2 },
+	     { 0x1.8acae5bb55badp-1, 0x1.800cd6f19c25ep-2 },
+	     { 0x1.886db5d9275b8p-1, 0x1.88ec441df11dfp-2 },
+	     { 0x1.8617ba567c13cp-1, 0x1.91be21b7c93f5p-2 },
+	     { 0x1.83c8d27487800p-1, 0x1.9a8298f8c7454p-2 },
+	     { 0x1.8180de3c5dbe7p-1, 0x1.a339d255c04ddp-2 },
+	     { 0x1.7f3fbe71cdb71p-1, 0x1.abe3f59f43db7p-2 },
+	     { 0x1.7d055498071c1p-1, 0x1.b48129deca9efp-2 },
+	     { 0x1.7ad182e54f65ap-1, 0x1.bd119575364c1p-2 },
+	     { 0x1.78a42c3c90125p-1, 0x1.c5955e23ebcbcp-2 },
+	     { 0x1.767d342f76944p-1, 0x1.ce0ca8f4e1557p-2 },
+	     { 0x1.745c7ef26b00ap-1, 0x1.d6779a5a75774p-2 },
+	     { 0x1.7241f15769d0fp-1, 0x1.ded6563550d27p-2 },
+	     { 0x1.702d70d396e41p-1, 0x1.e728ffafd840ep-2 },
+	     { 0x1.6e1ee3700cd11p-1, 0x1.ef6fb96c8d739p-2 },
+	     { 0x1.6c162fc9cbe02p-1, 0x1.f7aaa57907219p-2 } }
+};
diff --git a/sysdeps/aarch64/fpu/vecmath_config.h b/sysdeps/aarch64/fpu/vecmath_config.h
index 0abfd8b701..3aa6c280aa 100644
--- a/sysdeps/aarch64/fpu/vecmath_config.h
+++ b/sysdeps/aarch64/fpu/vecmath_config.h
@@ -50,4 +50,16 @@  extern const struct v_log_data
 
 #define V_EXP_TABLE_BITS 7
 extern const uint64_t __v_exp_data[1 << V_EXP_TABLE_BITS] attribute_hidden;
+
+#define V_LOG2_TABLE_BITS 7
+extern const struct v_log2_data
+{
+  double poly[5];
+  double invln2;
+  struct
+  {
+    double invc, log2c;
+  } table[1 << V_LOG2_TABLE_BITS];
+} __v_log2_data attribute_hidden;
+
 #endif
diff --git a/sysdeps/aarch64/libm-test-ulps b/sysdeps/aarch64/libm-test-ulps
index a1e5651c87..7a5af571e2 100644
--- a/sysdeps/aarch64/libm-test-ulps
+++ b/sysdeps/aarch64/libm-test-ulps
@@ -1220,11 +1220,19 @@  double: 1
 float: 1
 ldouble: 3
 
+Function: "log2_advsimd":
+double: 1
+float: 2
+
 Function: "log2_downward":
 double: 3
 float: 3
 ldouble: 3
 
+Function: "log2_sve":
+double: 1
+float: 2
+
 Function: "log2_towardzero":
 double: 2
 float: 2
diff --git a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
index 6046c3d046..657edab7ae 100644
--- a/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
+++ b/sysdeps/unix/sysv/linux/aarch64/libmvec.abilist
@@ -15,10 +15,14 @@  GLIBC_2.38 _ZGVsMxv_logf F
 GLIBC_2.38 _ZGVsMxv_sin F
 GLIBC_2.38 _ZGVsMxv_sinf F
 GLIBC_2.39 _ZGVnN2v_exp2 F
+GLIBC_2.39 _ZGVnN2v_log2 F
 GLIBC_2.39 _ZGVnN2v_tan F
 GLIBC_2.39 _ZGVnN4v_exp2f F
+GLIBC_2.39 _ZGVnN4v_log2f F
 GLIBC_2.39 _ZGVnN4v_tanf F
 GLIBC_2.39 _ZGVsMxv_exp2 F
 GLIBC_2.39 _ZGVsMxv_exp2f F
+GLIBC_2.39 _ZGVsMxv_log2 F
+GLIBC_2.39 _ZGVsMxv_log2f F
 GLIBC_2.39 _ZGVsMxv_tan F
 GLIBC_2.39 _ZGVsMxv_tanf F