[v2,33/36] arm: [MVE intrinsics] rework vadciq

Message ID 20240904132650.2720446-34-christophe.lyon@linaro.org
State Committed
Commit cb21ceae313825d01e72726e18ab213e34c6a7b2
Delegated to: Richard Earnshaw
Headers
Series arm: [MVE intrinsics] Re-implement more intrinsics |

Commit Message

Christophe Lyon Sept. 4, 2024, 1:26 p.m. UTC
  Implement vadciq using the new MVE builtins framework.

2024-08-28  Christophe Lyon  <christophe.lyon@linaro.org>
	gcc/

	* config/arm/arm-mve-builtins-base.cc (class vadc_vsbc_impl): New.
	(vadciq): New.
	* config/arm/arm-mve-builtins-base.def (vadciq): New.
	* config/arm/arm-mve-builtins-base.h (vadciq): New.
	* config/arm/arm_mve.h (vadciq): Delete.
	(vadciq_m): Delete.
	(vadciq_s32): Delete.
	(vadciq_u32): Delete.
	(vadciq_m_s32): Delete.
	(vadciq_m_u32): Delete.
	(__arm_vadciq_s32): Delete.
	(__arm_vadciq_u32): Delete.
	(__arm_vadciq_m_s32): Delete.
	(__arm_vadciq_m_u32): Delete.
	(__arm_vadciq): Delete.
	(__arm_vadciq_m): Delete.
---
 gcc/config/arm/arm-mve-builtins-base.cc  | 93 ++++++++++++++++++++++++
 gcc/config/arm/arm-mve-builtins-base.def |  1 +
 gcc/config/arm/arm-mve-builtins-base.h   |  1 +
 gcc/config/arm/arm_mve.h                 | 89 -----------------------
 4 files changed, 95 insertions(+), 89 deletions(-)
  

Comments

Richard Earnshaw (lists) Oct. 14, 2024, 6:04 p.m. UTC | #1
On 04/09/2024 14:26, Christophe Lyon wrote:
> Implement vadciq using the new MVE builtins framework.
> 
> 2024-08-28  Christophe Lyon  <christophe.lyon@linaro.org>
> 	gcc/
> 
> 	* config/arm/arm-mve-builtins-base.cc (class vadc_vsbc_impl): New.
> 	(vadciq): New.
> 	* config/arm/arm-mve-builtins-base.def (vadciq): New.
> 	* config/arm/arm-mve-builtins-base.h (vadciq): New.
> 	* config/arm/arm_mve.h (vadciq): Delete.
> 	(vadciq_m): Delete.
> 	(vadciq_s32): Delete.
> 	(vadciq_u32): Delete.
> 	(vadciq_m_s32): Delete.
> 	(vadciq_m_u32): Delete.
> 	(__arm_vadciq_s32): Delete.
> 	(__arm_vadciq_u32): Delete.
> 	(__arm_vadciq_m_s32): Delete.
> 	(__arm_vadciq_m_u32): Delete.
> 	(__arm_vadciq): Delete.
> 	(__arm_vadciq_m): Delete.

OK.

R.

> ---
>  gcc/config/arm/arm-mve-builtins-base.cc  | 93 ++++++++++++++++++++++++
>  gcc/config/arm/arm-mve-builtins-base.def |  1 +
>  gcc/config/arm/arm-mve-builtins-base.h   |  1 +
>  gcc/config/arm/arm_mve.h                 | 89 -----------------------
>  4 files changed, 95 insertions(+), 89 deletions(-)
> 
> diff --git a/gcc/config/arm/arm-mve-builtins-base.cc b/gcc/config/arm/arm-mve-builtins-base.cc
> index 9f1f7e69c57..6f3b18c2915 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.cc
> +++ b/gcc/config/arm/arm-mve-builtins-base.cc
> @@ -554,6 +554,98 @@ public:
>    }
>  };
>  
> +/* Map the vadc and similar functions directly to CODE (UNSPEC, UNSPEC).  Take
> +   care of the implicit carry argument.  */
> +class vadc_vsbc_impl : public function_base
> +{
> +public:
> +  unsigned int
> +  call_properties (const function_instance &) const override
> +  {
> +    unsigned int flags = CP_WRITE_MEMORY | CP_READ_FPCR;
> +    return flags;
> +  }
> +
> +  tree
> +  memory_scalar_type (const function_instance &) const override
> +  {
> +    /* carry is "unsigned int".  */
> +    return get_typenode_from_name ("unsigned int");
> +  }
> +
> +  rtx
> +  expand (function_expander &e) const override
> +  {
> +    insn_code code;
> +    rtx insns, carry_ptr, carry_out;
> +    int carry_out_arg_no;
> +    int unspec;
> +
> +    if (! e.type_suffix (0).integer_p)
> +      gcc_unreachable ();
> +
> +    if (e.mode_suffix_id != MODE_none)
> +      gcc_unreachable ();
> +
> +    /* Remove carry from arguments, it is implicit for the builtin.  */
> +    switch (e.pred)
> +      {
> +      case PRED_none:
> +	carry_out_arg_no = 2;
> +	break;
> +
> +      case PRED_m:
> +	carry_out_arg_no = 3;
> +	break;
> +
> +      default:
> +	gcc_unreachable ();
> +      }
> +
> +    carry_ptr = e.args[carry_out_arg_no];
> +    e.args.ordered_remove (carry_out_arg_no);
> +
> +    switch (e.pred)
> +      {
> +      case PRED_none:
> +	/* No predicate.  */
> +	unspec = e.type_suffix (0).unsigned_p
> +	  ? VADCIQ_U
> +	  : VADCIQ_S;
> +	code = code_for_mve_q_v4si (unspec, unspec);
> +	insns = e.use_exact_insn (code);
> +	break;
> +
> +      case PRED_m:
> +	/* "m" predicate.  */
> +	unspec = e.type_suffix (0).unsigned_p
> +	  ? VADCIQ_M_U
> +	  : VADCIQ_M_S;
> +	code = code_for_mve_q_m_v4si (unspec, unspec);
> +	insns = e.use_cond_insn (code, 0);
> +	break;
> +
> +      default:
> +	gcc_unreachable ();
> +      }
> +
> +    /* Update carry_out.  */
> +    carry_out = gen_reg_rtx (SImode);
> +    emit_insn (gen_get_fpscr_nzcvqc (carry_out));
> +    emit_insn (gen_rtx_SET (carry_out,
> +			    gen_rtx_LSHIFTRT (SImode,
> +					      carry_out,
> +					      GEN_INT (29))));
> +    emit_insn (gen_rtx_SET (carry_out,
> +			    gen_rtx_AND (SImode,
> +					 carry_out,
> +					 GEN_INT (1))));
> +    emit_insn (gen_rtx_SET (gen_rtx_MEM (Pmode, carry_ptr), carry_out));
> +
> +    return insns;
> +  }
> +};
> +
>  } /* end anonymous namespace */
>  
>  namespace arm_mve {
> @@ -724,6 +816,7 @@ namespace arm_mve {
>  FUNCTION_PRED_P_S_U (vabavq, VABAVQ)
>  FUNCTION_WITHOUT_N (vabdq, VABDQ)
>  FUNCTION (vabsq, unspec_based_mve_function_exact_insn, (ABS, ABS, ABS, -1, -1, -1, VABSQ_M_S, -1, VABSQ_M_F, -1, -1, -1))
> +FUNCTION (vadciq, vadc_vsbc_impl,)
>  FUNCTION_WITH_RTX_M_N (vaddq, PLUS, VADDQ)
>  FUNCTION_PRED_P_S_U (vaddlvaq, VADDLVAQ)
>  FUNCTION_PRED_P_S_U (vaddlvq, VADDLVQ)
> diff --git a/gcc/config/arm/arm-mve-builtins-base.def b/gcc/config/arm/arm-mve-builtins-base.def
> index bd69f06d7e4..72d6461c4e4 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.def
> +++ b/gcc/config/arm/arm-mve-builtins-base.def
> @@ -21,6 +21,7 @@
>  DEF_MVE_FUNCTION (vabavq, binary_acca_int32, all_integer, p_or_none)
>  DEF_MVE_FUNCTION (vabdq, binary, all_integer, mx_or_none)
>  DEF_MVE_FUNCTION (vabsq, unary, all_signed, mx_or_none)
> +DEF_MVE_FUNCTION (vadciq, vadc_vsbc, integer_32, m_or_none)
>  DEF_MVE_FUNCTION (vaddlvaq, unary_widen_acc, integer_32, p_or_none)
>  DEF_MVE_FUNCTION (vaddlvq, unary_acc, integer_32, p_or_none)
>  DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_integer, mx_or_none)
> diff --git a/gcc/config/arm/arm-mve-builtins-base.h b/gcc/config/arm/arm-mve-builtins-base.h
> index 1eff50d3c6d..2dfc2e18062 100644
> --- a/gcc/config/arm/arm-mve-builtins-base.h
> +++ b/gcc/config/arm/arm-mve-builtins-base.h
> @@ -26,6 +26,7 @@ namespace functions {
>  extern const function_base *const vabavq;
>  extern const function_base *const vabdq;
>  extern const function_base *const vabsq;
> +extern const function_base *const vadciq;
>  extern const function_base *const vaddlvaq;
>  extern const function_base *const vaddlvq;
>  extern const function_base *const vaddq;
> diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
> index c577c373e98..3a0b3041c42 100644
> --- a/gcc/config/arm/arm_mve.h
> +++ b/gcc/config/arm/arm_mve.h
> @@ -85,8 +85,6 @@
>  #define vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p)
>  #define vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p)
>  #define vstrwq_scatter_base_wb(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb(__addr, __offset, __value)
> -#define vadciq(__a, __b, __carry_out) __arm_vadciq(__a, __b, __carry_out)
> -#define vadciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m(__inactive, __a, __b, __carry_out, __p)
>  #define vadcq(__a, __b, __carry) __arm_vadcq(__a, __b, __carry)
>  #define vadcq_m(__inactive, __a, __b, __carry, __p) __arm_vadcq_m(__inactive, __a, __b, __carry, __p)
>  #define vsbciq(__a, __b, __carry_out) __arm_vsbciq(__a, __b, __carry_out)
> @@ -321,10 +319,6 @@
>  #define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value)
>  #define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value)
>  #define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value)
> -#define vadciq_s32(__a, __b,  __carry_out) __arm_vadciq_s32(__a, __b,  __carry_out)
> -#define vadciq_u32(__a, __b,  __carry_out) __arm_vadciq_u32(__a, __b,  __carry_out)
> -#define vadciq_m_s32(__inactive, __a, __b,  __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b,  __carry_out, __p)
> -#define vadciq_m_u32(__inactive, __a, __b,  __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b,  __carry_out, __p)
>  #define vadcq_s32(__a, __b,  __carry) __arm_vadcq_s32(__a, __b,  __carry)
>  #define vadcq_u32(__a, __b,  __carry) __arm_vadcq_u32(__a, __b,  __carry)
>  #define vadcq_m_s32(__inactive, __a, __b,  __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b,  __carry, __p)
> @@ -1690,42 +1684,6 @@ __arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint3
>    *__addr = __builtin_mve_vstrwq_scatter_base_wb_uv4si (*__addr, __offset, __value);
>  }
>  
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
> -{
> -  int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b);
> -  *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> -  return __res;
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
> -{
> -  uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b);
> -  *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> -  return __res;
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
> -{
> -  int32x4_t __res =  __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p);
> -  *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> -  return __res;
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
> -{
> -  uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p);
> -  *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
> -  return __res;
> -}
> -
>  __extension__ extern __inline int32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
> @@ -3642,34 +3600,6 @@ __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, uint32x4_
>   __arm_vstrwq_scatter_base_wb_u32 (__addr, __offset, __value);
>  }
>  
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vadciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
> -{
> - return __arm_vadciq_s32 (__a, __b, __carry_out);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vadciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
> -{
> - return __arm_vadciq_u32 (__a, __b, __carry_out);
> -}
> -
> -__extension__ extern __inline int32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vadciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
> -{
> - return __arm_vadciq_m_s32 (__inactive, __a, __b, __carry_out, __p);
> -}
> -
> -__extension__ extern __inline uint32x4_t
> -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> -__arm_vadciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
> -{
> - return __arm_vadciq_m_u32 (__inactive, __a, __b, __carry_out, __p);
> -}
> -
>  __extension__ extern __inline int32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  __arm_vadcq (int32x4_t __a, int32x4_t __b, unsigned * __carry)
> @@ -5289,12 +5219,6 @@ extern void *__ARM_undef;
>  #endif /* MVE Integer.  */
>  
>  
> -#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
> -
>  #define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
>    _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
>    int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
> @@ -5321,19 +5245,6 @@ extern void *__ARM_undef;
>    int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1, p2), \
>    int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1, p2)))
>  
> -#define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  __typeof(p2) __p2 = (p2); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
> -
> -#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
> -  __typeof(p1) __p1 = (p1); \
> -  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
> -  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
> -  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
> -
>  #define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
>    __typeof(p1) __p1 = (p1); \
>    __typeof(p2) __p2 = (p2); \
  

Patch

diff --git a/gcc/config/arm/arm-mve-builtins-base.cc b/gcc/config/arm/arm-mve-builtins-base.cc
index 9f1f7e69c57..6f3b18c2915 100644
--- a/gcc/config/arm/arm-mve-builtins-base.cc
+++ b/gcc/config/arm/arm-mve-builtins-base.cc
@@ -554,6 +554,98 @@  public:
   }
 };
 
+/* Map the vadc and similar functions directly to CODE (UNSPEC, UNSPEC).  Take
+   care of the implicit carry argument.  */
+class vadc_vsbc_impl : public function_base
+{
+public:
+  unsigned int
+  call_properties (const function_instance &) const override
+  {
+    unsigned int flags = CP_WRITE_MEMORY | CP_READ_FPCR;
+    return flags;
+  }
+
+  tree
+  memory_scalar_type (const function_instance &) const override
+  {
+    /* carry is "unsigned int".  */
+    return get_typenode_from_name ("unsigned int");
+  }
+
+  rtx
+  expand (function_expander &e) const override
+  {
+    insn_code code;
+    rtx insns, carry_ptr, carry_out;
+    int carry_out_arg_no;
+    int unspec;
+
+    if (! e.type_suffix (0).integer_p)
+      gcc_unreachable ();
+
+    if (e.mode_suffix_id != MODE_none)
+      gcc_unreachable ();
+
+    /* Remove carry from arguments, it is implicit for the builtin.  */
+    switch (e.pred)
+      {
+      case PRED_none:
+	carry_out_arg_no = 2;
+	break;
+
+      case PRED_m:
+	carry_out_arg_no = 3;
+	break;
+
+      default:
+	gcc_unreachable ();
+      }
+
+    carry_ptr = e.args[carry_out_arg_no];
+    e.args.ordered_remove (carry_out_arg_no);
+
+    switch (e.pred)
+      {
+      case PRED_none:
+	/* No predicate.  */
+	unspec = e.type_suffix (0).unsigned_p
+	  ? VADCIQ_U
+	  : VADCIQ_S;
+	code = code_for_mve_q_v4si (unspec, unspec);
+	insns = e.use_exact_insn (code);
+	break;
+
+      case PRED_m:
+	/* "m" predicate.  */
+	unspec = e.type_suffix (0).unsigned_p
+	  ? VADCIQ_M_U
+	  : VADCIQ_M_S;
+	code = code_for_mve_q_m_v4si (unspec, unspec);
+	insns = e.use_cond_insn (code, 0);
+	break;
+
+      default:
+	gcc_unreachable ();
+      }
+
+    /* Update carry_out.  */
+    carry_out = gen_reg_rtx (SImode);
+    emit_insn (gen_get_fpscr_nzcvqc (carry_out));
+    emit_insn (gen_rtx_SET (carry_out,
+			    gen_rtx_LSHIFTRT (SImode,
+					      carry_out,
+					      GEN_INT (29))));
+    emit_insn (gen_rtx_SET (carry_out,
+			    gen_rtx_AND (SImode,
+					 carry_out,
+					 GEN_INT (1))));
+    emit_insn (gen_rtx_SET (gen_rtx_MEM (Pmode, carry_ptr), carry_out));
+
+    return insns;
+  }
+};
+
 } /* end anonymous namespace */
 
 namespace arm_mve {
@@ -724,6 +816,7 @@  namespace arm_mve {
 FUNCTION_PRED_P_S_U (vabavq, VABAVQ)
 FUNCTION_WITHOUT_N (vabdq, VABDQ)
 FUNCTION (vabsq, unspec_based_mve_function_exact_insn, (ABS, ABS, ABS, -1, -1, -1, VABSQ_M_S, -1, VABSQ_M_F, -1, -1, -1))
+FUNCTION (vadciq, vadc_vsbc_impl,)
 FUNCTION_WITH_RTX_M_N (vaddq, PLUS, VADDQ)
 FUNCTION_PRED_P_S_U (vaddlvaq, VADDLVAQ)
 FUNCTION_PRED_P_S_U (vaddlvq, VADDLVQ)
diff --git a/gcc/config/arm/arm-mve-builtins-base.def b/gcc/config/arm/arm-mve-builtins-base.def
index bd69f06d7e4..72d6461c4e4 100644
--- a/gcc/config/arm/arm-mve-builtins-base.def
+++ b/gcc/config/arm/arm-mve-builtins-base.def
@@ -21,6 +21,7 @@ 
 DEF_MVE_FUNCTION (vabavq, binary_acca_int32, all_integer, p_or_none)
 DEF_MVE_FUNCTION (vabdq, binary, all_integer, mx_or_none)
 DEF_MVE_FUNCTION (vabsq, unary, all_signed, mx_or_none)
+DEF_MVE_FUNCTION (vadciq, vadc_vsbc, integer_32, m_or_none)
 DEF_MVE_FUNCTION (vaddlvaq, unary_widen_acc, integer_32, p_or_none)
 DEF_MVE_FUNCTION (vaddlvq, unary_acc, integer_32, p_or_none)
 DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_integer, mx_or_none)
diff --git a/gcc/config/arm/arm-mve-builtins-base.h b/gcc/config/arm/arm-mve-builtins-base.h
index 1eff50d3c6d..2dfc2e18062 100644
--- a/gcc/config/arm/arm-mve-builtins-base.h
+++ b/gcc/config/arm/arm-mve-builtins-base.h
@@ -26,6 +26,7 @@  namespace functions {
 extern const function_base *const vabavq;
 extern const function_base *const vabdq;
 extern const function_base *const vabsq;
+extern const function_base *const vadciq;
 extern const function_base *const vaddlvaq;
 extern const function_base *const vaddlvq;
 extern const function_base *const vaddq;
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index c577c373e98..3a0b3041c42 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -85,8 +85,6 @@ 
 #define vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p)
 #define vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p)
 #define vstrwq_scatter_base_wb(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb(__addr, __offset, __value)
-#define vadciq(__a, __b, __carry_out) __arm_vadciq(__a, __b, __carry_out)
-#define vadciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m(__inactive, __a, __b, __carry_out, __p)
 #define vadcq(__a, __b, __carry) __arm_vadcq(__a, __b, __carry)
 #define vadcq_m(__inactive, __a, __b, __carry, __p) __arm_vadcq_m(__inactive, __a, __b, __carry, __p)
 #define vsbciq(__a, __b, __carry_out) __arm_vsbciq(__a, __b, __carry_out)
@@ -321,10 +319,6 @@ 
 #define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value)
 #define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value)
 #define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value)
-#define vadciq_s32(__a, __b,  __carry_out) __arm_vadciq_s32(__a, __b,  __carry_out)
-#define vadciq_u32(__a, __b,  __carry_out) __arm_vadciq_u32(__a, __b,  __carry_out)
-#define vadciq_m_s32(__inactive, __a, __b,  __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b,  __carry_out, __p)
-#define vadciq_m_u32(__inactive, __a, __b,  __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b,  __carry_out, __p)
 #define vadcq_s32(__a, __b,  __carry) __arm_vadcq_s32(__a, __b,  __carry)
 #define vadcq_u32(__a, __b,  __carry) __arm_vadcq_u32(__a, __b,  __carry)
 #define vadcq_m_s32(__inactive, __a, __b,  __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b,  __carry, __p)
@@ -1690,42 +1684,6 @@  __arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint3
   *__addr = __builtin_mve_vstrwq_scatter_base_wb_uv4si (*__addr, __offset, __value);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
-{
-  int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b);
-  *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
-  return __res;
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
-{
-  uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b);
-  *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
-  return __res;
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
-{
-  int32x4_t __res =  __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p);
-  *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
-  return __res;
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
-{
-  uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p);
-  *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
-  return __res;
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
@@ -3642,34 +3600,6 @@  __arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, uint32x4_
  __arm_vstrwq_scatter_base_wb_u32 (__addr, __offset, __value);
 }
 
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vadciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
-{
- return __arm_vadciq_s32 (__a, __b, __carry_out);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vadciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
-{
- return __arm_vadciq_u32 (__a, __b, __carry_out);
-}
-
-__extension__ extern __inline int32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vadciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
-{
- return __arm_vadciq_m_s32 (__inactive, __a, __b, __carry_out, __p);
-}
-
-__extension__ extern __inline uint32x4_t
-__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
-__arm_vadciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
-{
- return __arm_vadciq_m_u32 (__inactive, __a, __b, __carry_out, __p);
-}
-
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 __arm_vadcq (int32x4_t __a, int32x4_t __b, unsigned * __carry)
@@ -5289,12 +5219,6 @@  extern void *__ARM_undef;
 #endif /* MVE Integer.  */
 
 
-#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
-
 #define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
   _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
   int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
@@ -5321,19 +5245,6 @@  extern void *__ARM_undef;
   int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1, p2), \
   int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1, p2)))
 
-#define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  __typeof(p2) __p2 = (p2); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
-
-#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
-  __typeof(p1) __p1 = (p1); \
-  _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
-  int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
-  int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
-
 #define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
   __typeof(p1) __p1 = (p1); \
   __typeof(p2) __p2 = (p2); \