@@ -3063,6 +3063,1709 @@ vmv::expand (const function_instance &instance, tree exp, rtx target) const
return expand_builtin_insn (icode, exp, target, instance);
}
+/* A function implementation for reduction functions. */
+char *
+reduceop::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0, 1);
+ append_name (instance.get_base_name ());
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+void
+reduceop::get_argument_types (const function_instance &instance,
+ vec<tree> &argument_types) const
+{
+ for (unsigned int i = 1; i < instance.get_arg_pattern ().arg_len; i++)
+ argument_types.quick_push (get_dt_t_with_index (instance, i));
+}
+
+tree
+reduceop::get_mask_type (tree, const function_instance &,
+ const vec<tree> &argument_types) const
+{
+ machine_mode mask_mode;
+ gcc_assert (rvv_get_mask_mode (TYPE_MODE (argument_types[0])).exists (&mask_mode));
+ return mode2mask_t (mask_mode);
+}
+
+/* A function implementation for vsadd functions. */
+rtx
+vsadd::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (SS_PLUS, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VSADD, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vsaddu functions. */
+rtx
+vsaddu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (US_PLUS, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VSADDU, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vaadd functions. */
+rtx
+vaadd::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (UNSPEC_AADD, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VAADD, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vaaddu functions. */
+rtx
+vaaddu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (UNSPEC_AADDU, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VAADDU, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vssub functions. */
+rtx
+vssub::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vsssub_vv (mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VSSUB, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vssubu functions. */
+rtx
+vssubu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vussub_vv (mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VSSUBU, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vasub functions. */
+rtx
+vasub::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (UNSPEC_ASUB, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VASUB, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vasubu functions. */
+rtx
+vasubu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (UNSPEC_ASUBU, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VASUBU, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vssrl functions. */
+rtx
+vssrl::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (UNSPEC_SSRL, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_SSRL, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vssra functions. */
+rtx
+vssra::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (UNSPEC_SSRA, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_SSRA, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vsmul functions. */
+rtx
+vsmul::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_v_vv (UNSPEC_SMUL, mode);
+ else
+ icode = code_for_v_vx (UNSPEC_VSMUL, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vnclip functions. */
+rtx
+vnclip::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_wv)
+ icode = code_for_vn_wv (UNSPEC_SIGNED_CLIP, mode);
+ else
+ icode = code_for_vn_wx (UNSPEC_SIGNED_CLIP, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vnclipu functions. */
+rtx
+vnclipu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_wv)
+ icode = code_for_vn_wv (UNSPEC_UNSIGNED_CLIP, mode);
+ else
+ icode = code_for_vn_wx (UNSPEC_UNSIGNED_CLIP, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for funop functions. */
+unsigned int
+funop::call_properties () const
+{
+ return CP_RAISE_FP_EXCEPTIONS;
+}
+
+/* A function implementation for fbinop functions. */
+unsigned int
+fbinop::call_properties () const
+{
+ return CP_RAISE_FP_EXCEPTIONS;
+}
+
+/* A function implementation for fwbinop functions. */
+unsigned int
+fwbinop::call_properties () const
+{
+ return CP_RAISE_FP_EXCEPTIONS;
+}
+
+/* A function implementation for fternop functions. */
+unsigned int
+fternop::call_properties () const
+{
+ return CP_RAISE_FP_EXCEPTIONS;
+}
+
+/* A function implementation for vfadd functions. */
+rtx
+vfadd::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (PLUS, mode);
+ else
+ icode = code_for_vf_vf (PLUS, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfsub functions. */
+rtx
+vfsub::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (MINUS, mode);
+ else
+ icode = code_for_vf_vf (MINUS, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmul functions. */
+rtx
+vfmul::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (MULT, mode);
+ else
+ icode = code_for_vf_vf (MULT, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfdiv functions. */
+rtx
+vfdiv::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (DIV, mode);
+ else
+ icode = code_for_vf_vf (DIV, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfrsub and vfrdiv functions. */
+rtx
+vfrsub::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfr_vf (MINUS, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfrdiv functions. */
+rtx
+vfrdiv::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfr_vf (DIV, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfneg functions. */
+rtx
+vfneg::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfneg_v (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwadd functions. */
+rtx
+vfwadd::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfw_vv (PLUS, mode);
+ else if (instance.get_operation () == OP_vf)
+ icode = code_for_vfw_vf (PLUS, mode);
+ else if (instance.get_operation () == OP_wv)
+ icode = code_for_vfw_wv (PLUS, mode);
+ else
+ icode = code_for_vfw_wf (PLUS, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwsub functions. */
+rtx
+vfwsub::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfw_vv (MINUS, mode);
+ else if (instance.get_operation () == OP_vf)
+ icode = code_for_vfw_vf (MINUS, mode);
+ else if (instance.get_operation () == OP_wv)
+ icode = code_for_vfw_wv (MINUS, mode);
+ else
+ icode = code_for_vfw_wf (MINUS, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwmul functions. */
+rtx
+vfwmul::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfwmul_vv (mode);
+ else
+ icode = code_for_vfwmul_vf (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmacc functions. */
+rtx
+vfmacc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (UNSPEC_MACC, mode);
+ else
+ icode = code_for_vf_vf (UNSPEC_MACC, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmsac functions. */
+rtx
+vfmsac::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (UNSPEC_MSAC, mode);
+ else
+ icode = code_for_vf_vf (UNSPEC_MSAC, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfnmacc functions. */
+rtx
+vfnmacc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (UNSPEC_NMACC, mode);
+ else
+ icode = code_for_vf_vf (UNSPEC_NMACC, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfnmsac functions. */
+rtx
+vfnmsac::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (UNSPEC_NMSAC, mode);
+ else
+ icode = code_for_vf_vf (UNSPEC_NMSAC, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmadd functions. */
+rtx
+vfmadd::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (UNSPEC_MADD, mode);
+ else
+ icode = code_for_vf_vf (UNSPEC_MADD, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfnmadd functions. */
+rtx
+vfnmadd::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (UNSPEC_NMADD, mode);
+ else
+ icode = code_for_vf_vf (UNSPEC_NMADD, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmsub functions. */
+rtx
+vfmsub::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (UNSPEC_MSUB, mode);
+ else
+ icode = code_for_vf_vf (UNSPEC_MSUB, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfnmsub functions. */
+rtx
+vfnmsub::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (UNSPEC_NMSUB, mode);
+ else
+ icode = code_for_vf_vf (UNSPEC_NMSUB, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwmacc functions. */
+rtx
+vfwmacc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfwmacc_vv (mode);
+ else
+ icode = code_for_vfwmacc_vf (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwnmacc functions. */
+rtx
+vfwnmacc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfwnmacc_vv (mode);
+ else
+ icode = code_for_vfwnmacc_vf (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwmsac functions. */
+rtx
+vfwmsac::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfwmsac_vv (mode);
+ else
+ icode = code_for_vfwmsac_vf (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwnmsac functions. */
+rtx
+vfwnmsac::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfwnmsac_vv (mode);
+ else
+ icode = code_for_vfwnmsac_vf (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfsqrt functions. */
+rtx
+vfsqrt::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfsqrt_v (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfrsqrt7 functions. */
+rtx
+vfrsqrt7::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vf_v (UNSPEC_RSQRT7, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfrec7 functions. */
+rtx
+vfrec7::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vf_v (UNSPEC_REC7, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmax functions. */
+rtx
+vfmax::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (SMAX, mode);
+ else
+ icode = code_for_vf_vf (SMAX, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmin functions. */
+rtx
+vfmin::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vf_vv (SMIN, mode);
+ else
+ icode = code_for_vf_vf (SMIN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfsgnj functions. */
+rtx
+vfsgnj::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfsgnj_vv (UNSPEC_COPYSIGN, mode);
+ else
+ icode = code_for_vfsgnj_vf (UNSPEC_COPYSIGN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfsgnjn functions. */
+rtx
+vfsgnjn::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfsgnj_vv (UNSPEC_NCOPYSIGN, mode);
+ else
+ icode = code_for_vfsgnj_vf (UNSPEC_NCOPYSIGN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfsgnjx functions. */
+rtx
+vfsgnjx::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vfsgnj_vv (UNSPEC_XORSIGN, mode);
+ else
+ icode = code_for_vfsgnj_vf (UNSPEC_XORSIGN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfabs functions. */
+rtx
+vfabs::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfabs_v (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfcmp functions. */
+char *
+vfcmp::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0, 1);
+ append_name (instance.get_base_name ());
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+/* A function implementation for vmfeq functions. */
+rtx
+vmfeq::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vmf_vv (EQ, mode);
+ else
+ icode = code_for_vmf_vf (EQ, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmfne functions. */
+rtx
+vmfne::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vmf_vv (NE, mode);
+ else
+ icode = code_for_vmf_vf (NE, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmflt functions. */
+rtx
+vmflt::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vmf_vv (LT, mode);
+ else
+ icode = code_for_vmf_vf (LT, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmfgt functions. */
+rtx
+vmfgt::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vmf_vv (GT, mode);
+ else
+ icode = code_for_vmf_vf (GT, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmfle functions. */
+rtx
+vmfle::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vmf_vv (LE, mode);
+ else
+ icode = code_for_vmf_vf (LE, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmfge functions. */
+rtx
+vmfge::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vmf_vv (GE, mode);
+ else
+ icode = code_for_vmf_vf (GE, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfclass functions. */
+rtx
+vfclass::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfclass_v (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmerge functions. */
+size_t
+vfmerge::get_position_of_dest_arg (enum predication_index) const
+{
+ return 1;
+}
+
+rtx
+vfmerge::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfmerge_vfm (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmv functions. */
+bool
+vfmv::can_be_overloaded_p (const function_instance &instance) const
+{
+ if (instance.get_pred () == PRED_tu)
+ return true;
+
+ return false;
+}
+
+rtx
+vfmv::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_v_f)
+ icode = code_for_vfmv_v_f (mode);
+ else
+ icode = code_for_vmv_v_v (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfcvt_x_f_v functions. */
+char *
+vfcvt_f2i::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfcvt_x");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfcvt_f2i::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfcvt_x_f_v (mode, UNSPEC_FLOAT_TO_SIGNED_INT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfcvt_xu_f_v functions. */
+char *
+vfcvt_f2u::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfcvt_xu");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfcvt_f2u::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfcvt_x_f_v (mode, UNSPEC_FLOAT_TO_UNSIGNED_INT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfcvt_rtz_x_f_v functions. */
+char *
+vfcvt_rtz_f2i::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfcvt_rtz_x");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfcvt_rtz_f2i::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfcvt_rtz_x_f_v (mode, FIX);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfcvt_rtz_xu_f_v functions. */
+char *
+vfcvt_rtz_f2u::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfcvt_rtz_xu");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfcvt_rtz_f2u::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfcvt_rtz_x_f_v (mode, UNSIGNED_FIX);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfcvt_f_x_v functions. */
+char *
+vfcvt_i2f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfcvt_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfcvt_i2f::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfcvt_f_x_v (mode, FLOAT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfcvt_f_xu_v functions. */
+char *
+vfcvt_u2f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfcvt_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfcvt_u2f::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfcvt_f_x_v (mode, UNSIGNED_FLOAT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwcvt_x_f_v functions. */
+char *
+vfwcvt_f2i::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfwcvt_x");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfwcvt_f2i::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwcvt_x_f_v (mode, UNSPEC_FLOAT_TO_SIGNED_INT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwcvt_xu_f_v functions. */
+char *
+vfwcvt_f2u::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfwcvt_xu");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfwcvt_f2u::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwcvt_x_f_v (mode, UNSPEC_FLOAT_TO_UNSIGNED_INT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwcvt_rtz_x_f_v functions. */
+char *
+vfwcvt_rtz_f2i::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfwcvt_rtz_x");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfwcvt_rtz_f2i::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwcvt_rtz_x_f_v (mode, FIX);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwcvt_rtz_xu_f_v functions. */
+char *
+vfwcvt_rtz_f2u::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfwcvt_rtz_xu");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfwcvt_rtz_f2u::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwcvt_rtz_x_f_v (mode, UNSIGNED_FIX);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwcvt_f_x_v functions. */
+char *
+vfwcvt_i2f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfwcvt_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfwcvt_i2f::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwcvt_f_x_v (mode, FLOAT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwcvt_f_xu_v functions. */
+char *
+vfwcvt_u2f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfwcvt_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfwcvt_u2f::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwcvt_f_x_v (mode, UNSIGNED_FLOAT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwcvt_f_f_v functions. */
+char *
+vfwcvt_f2f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfwcvt_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfwcvt_f2f::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwcvt_f_f_v (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfncvt_x_f_w functions. */
+char *
+vfncvt_f2i::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfncvt_x");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfncvt_f2i::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfncvt_x_f_w (mode, UNSPEC_FLOAT_TO_SIGNED_INT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfncvt_xu_f_w functions. */
+char *
+vfncvt_f2u::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfncvt_xu");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfncvt_f2u::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfncvt_x_f_w (mode, UNSPEC_FLOAT_TO_UNSIGNED_INT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfncvt_rtz_x_f_w functions. */
+char *
+vfncvt_rtz_f2i::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfncvt_rtz_x");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfncvt_rtz_f2i::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfncvt_rtz_x_f_w (mode, FIX);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfncvt_rtz_xu_f_w functions. */
+char *
+vfncvt_rtz_f2u::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfncvt_rtz_xu");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfncvt_rtz_f2u::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfncvt_rtz_x_f_w (mode, UNSIGNED_FIX);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfncvt_f_x_w functions. */
+char *
+vfncvt_i2f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfncvt_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfncvt_i2f::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfncvt_f_x_w (mode, FLOAT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfncvt_f_xu_w functions. */
+char *
+vfncvt_u2f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfncvt_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfncvt_u2f::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfncvt_f_x_w (mode, UNSIGNED_FLOAT);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfncvt_f_f_w functions. */
+char *
+vfncvt_f2f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfncvt_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfncvt_f2f::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfncvt_f_f_w (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfncvt_rod_f_f_w functions. */
+char *
+vfncvt_f2rodf::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ append_name ("vfncvt_rod_f");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfncvt_f2rodf::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfncvt_rod_f_f_w (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vredsum functions. */
+rtx
+vredsum::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vred_vs (UNSPEC_REDUC_SUM, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vredmax functions. */
+rtx
+vredmax::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vred_vs (UNSPEC_REDUC_MAX, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vredmaxu functions. */
+rtx
+vredmaxu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vred_vs (UNSPEC_REDUC_MAXU, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vredmin functions. */
+rtx
+vredmin::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vred_vs (UNSPEC_REDUC_MIN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vredminu functions. */
+rtx
+vredminu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vred_vs (UNSPEC_REDUC_MINU, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vredand functions. */
+rtx
+vredand::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vred_vs (UNSPEC_REDUC_AND, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vredor functions. */
+rtx
+vredor::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vred_vs (UNSPEC_REDUC_OR, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vredxor functions. */
+rtx
+vredxor::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vred_vs (UNSPEC_REDUC_XOR, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwredsum functions. */
+rtx
+vwredsum::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vwredsum_vs (SIGN_EXTEND, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwredsumu functions. */
+rtx
+vwredsumu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vwredsum_vs (ZERO_EXTEND, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for freduceop functions. */
+unsigned int
+freduceop::call_properties () const
+{
+ return CP_RAISE_FP_EXCEPTIONS;
+}
+
+/* A function implementation for vfredosum functions. */
+rtx
+vfredosum::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfred_vs (UNSPEC_REDUC_ORDERED_SUM, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfredusum functions. */
+rtx
+vfredusum::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfred_vs (UNSPEC_REDUC_UNORDERED_SUM, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfredmax functions. */
+rtx
+vfredmax::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfred_vs (UNSPEC_REDUC_MAX, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfredmin functions. */
+rtx
+vfredmin::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfred_vs (UNSPEC_REDUC_MIN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwredosum functions. */
+rtx
+vfwredosum::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwredosum_vs (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfwredusum functions. */
+rtx
+vfwredusum::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfwredusum_vs (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmand functions. */
+rtx
+vmand::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vm_mm (AND, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmor functions. */
+rtx
+vmor::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vm_mm (IOR, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmxor functions. */
+rtx
+vmxor::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vm_mm (XOR, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmnand functions. */
+rtx
+vmnand::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmn_mm (AND, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmnor functions. */
+rtx
+vmnor::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmn_mm (IOR, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmxnor functions. */
+rtx
+vmxnor::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmn_mm (XOR, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmlogicn functions. */
+rtx
+vmandn::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmnot_mm (AND, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmorn functions. */
+rtx
+vmorn::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmnot_mm (IOR, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmmv functions. */
+rtx
+vmmv::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmmv_m (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmnot functions. */
+rtx
+vmnot::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmnot_m (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmclr functions. */
+void
+vmclr::get_argument_types (const function_instance &,
+ vec<tree> &) const
+{
+}
+
+bool
+vmclr::can_be_overloaded_p (const function_instance &) const
+{
+ return false;
+}
+
+rtx
+vmclr::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmclr_m (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmset functions. */
+void
+vmset::get_argument_types (const function_instance &,
+ vec<tree> &) const
+{
+}
+
+bool
+vmset::can_be_overloaded_p (const function_instance &) const
+{
+ return false;
+}
+
+rtx
+vmset::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vmset_m (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vcpop functions. */
+tree
+vcpop::get_return_type (const function_instance &) const
+{
+ return long_unsigned_type_node;
+}
+
+rtx
+vcpop::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vcpop_m (mode, Pmode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfirst functions. */
+tree
+vfirst::get_return_type (const function_instance &) const
+{
+ return long_integer_type_node;
+}
+
+rtx
+vfirst::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+ enum insn_code icode = code_for_vfirst_m (mode, Pmode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsbf functions. */
+rtx
+vmsbf::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vm_m (UNSPEC_SBF, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsif functions. */
+rtx
+vmsif::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vm_m (UNSPEC_SIF, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsof functions. */
+rtx
+vmsof::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vm_m (UNSPEC_SOF, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for viota functions. */
+bool
+viota::can_be_overloaded_p (const function_instance &instance) const
+{
+ if (instance.get_pred () == PRED_void || instance.get_pred () == PRED_ta ||
+ instance.get_pred () == PRED_tama)
+ return false;
+
+ return true;
+}
+
+rtx
+viota::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_viota_m (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vid functions. */
+void
+vid::get_argument_types (const function_instance &,
+ vec<tree> &) const
+{
+}
+
+bool
+vid::can_be_overloaded_p (const function_instance &instance) const
+{
+ if (instance.get_pred () == PRED_void || instance.get_pred () == PRED_ta ||
+ instance.get_pred () == PRED_tama)
+ return false;
+
+ return true;
+}
+
+rtx
+vid::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vid_v (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmv_x_s functions. */
+char *
+vmv_x_s::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0, 1);
+ append_name ("vmv_x");
+ return finish_name ();
+}
+
+rtx
+vmv_x_s::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vmv_x_s (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmv_s_x functions. */
+char *
+vmv_s_x::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ if (instance.get_pred () == PRED_ta)
+ return nullptr;
+ append_name ("vmv_s");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vmv_s_x::expand (const function_instance &instance, tree exp, rtx target) const
+{
+
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_v_s_x (UNSPEC_VMVS, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmv_f_s functions. */
+char *
+vfmv_f_s::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0, 1);
+ append_name ("vfmv_f");
+ return finish_name ();
+}
+
+rtx
+vfmv_f_s::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+ enum insn_code icode = code_for_vfmv_f_s (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfmv_s_f functions. */
+char *
+vfmv_s_f::assemble_name (function_instance &instance)
+{
+ intrinsic_rename (instance, 0);
+ if (instance.get_pred () == PRED_ta)
+ return nullptr;
+ append_name ("vfmv_s");
+ append_name (get_pred_str (instance.get_pred (), true));
+ return finish_name ();
+}
+
+rtx
+vfmv_s_f::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfmv_s_f (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vslideup functions. */
+rtx
+vslideup::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vslide_vx (UNSPEC_SLIDEUP, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vslidedown functions. */
+rtx
+vslidedown::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vslide_vx (UNSPEC_SLIDEDOWN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vslide1up functions. */
+rtx
+vslide1up::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vslide1_vx (UNSPEC_SLIDE1UP, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vslide1down functions. */
+rtx
+vslide1down::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vslide1_vx (UNSPEC_SLIDE1DOWN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfslide1up functions. */
+rtx
+vfslide1up::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfslide1_vf (UNSPEC_SLIDE1UP, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vfslide1down functions. */
+rtx
+vfslide1down::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vfslide1_vf (UNSPEC_SLIDE1DOWN, mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vrgather functions. */
+rtx
+vrgather::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode;
+ if (instance.get_operation () == OP_vv)
+ icode = code_for_vrgather_vv (mode);
+ else
+ icode = code_for_vrgather_vx (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vrgather functions. */
+rtx
+vrgatherei16::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vrgatherei16_vv (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vcompress functions. */
+size_t
+vcompress::get_position_of_dest_arg (enum predication_index) const
+{
+ return 1;
+}
+
+rtx
+vcompress::expand (const function_instance &instance, tree exp,
+ rtx target) const
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ enum insn_code icode = code_for_vcompress_vm (mode);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
} // end namespace riscv_vector
using namespace riscv_vector;
@@ -237,6 +237,154 @@ DEF_RVV_FUNCTION(vmerge, vmerge, (3, VITER(VF, signed), VATTR(0, VF, signed), VA
DEF_RVV_FUNCTION(vmv, vmv, (2, VITER(VI, signed), VATTR(0, VSUB, signed)), PAT_tail, pred_tail, OP_v_v | OP_v_x)
DEF_RVV_FUNCTION(vmv, vmv, (2, VITER(VI, unsigned), VATTR(0, VSUB, unsigned)), PAT_tail, pred_tail, OP_v_v | OP_v_x)
DEF_RVV_FUNCTION(vmv, vmv, (2, VITER(VF, signed), VATTR(0, VSUB, signed)), PAT_tail, pred_tail, OP_v_v)
+/* 12. Vector Fixed-Point Arithmetic Instructions. */
+DEF_RVV_FUNCTION(vsadd, vsadd, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vsaddu, vsaddu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vaadd, vaadd, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vaaddu, vaaddu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vssub, vssub, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vssubu, vssubu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vasub, vasub, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vasubu, vasubu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vsmul, vsmul, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vssrl, vssrl, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vssra, vssra, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vnclip, vnclip, (3, VITER(VWI, signed), VATTR(0, VW, signed), VATTR(0, VWI, unsigned)), pat_mask_tail, pred_all, OP_wv | OP_wx)
+DEF_RVV_FUNCTION(vnclipu, vnclipu, (3, VITER(VWI, unsigned), VATTR(0, VW, unsigned), VATTR(0, VWI, unsigned)), pat_mask_tail, pred_all, OP_wv | OP_wx)
+/* 13. Vector Floating-Point Arithmetic Instructions. */
+DEF_RVV_FUNCTION(vfadd, vfadd, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfsub, vfsub, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfmul, vfmul, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfdiv, vfdiv, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfrsub, vfrsub, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vf)
+DEF_RVV_FUNCTION(vfrdiv, vfrdiv, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vf)
+DEF_RVV_FUNCTION(vfneg, vfneg, (2, VITER(VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vfwadd, vfwadd, (3, VATTR(1, VW, signed), VITER(VWF, signed), VATTR(1, VWF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfwsub, vfwsub, (3, VATTR(1, VW, signed), VITER(VWF, signed), VATTR(1, VWF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfwadd, vfwadd, (3, VATTR(2, VW, signed), VATTR(2, VW, signed), VITER(VWF, signed)), pat_mask_tail, pred_all, OP_wv | OP_wf)
+DEF_RVV_FUNCTION(vfwsub, vfwsub, (3, VATTR(2, VW, signed), VATTR(2, VW, signed), VITER(VWF, signed)), pat_mask_tail, pred_all, OP_wv | OP_wf)
+DEF_RVV_FUNCTION(vfwmul, vfwmul, (3, VATTR(1, VW, signed), VITER(VWF, signed), VATTR(1, VWF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf | OP_wv | OP_wf)
+DEF_RVV_FUNCTION(vfmacc, vfmacc, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfmsac, vfmsac, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfnmacc, vfnmacc, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfnmsac, vfnmsac, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfmadd, vfmadd, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfnmadd, vfnmadd, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfmsub, vfmsub, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfnmsub, vfnmsub, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfwmacc, vfwmacc, (3, VATTR(1, VW, signed), VITER(VWF, signed), VATTR(1, VWF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfwmsac, vfwmsac, (3, VATTR(1, VW, signed), VITER(VWF, signed), VATTR(1, VWF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfwnmacc, vfwnmacc, (3, VATTR(1, VW, signed), VITER(VWF, signed), VATTR(1, VWF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfwnmsac, vfwnmsac, (3, VATTR(1, VW, signed), VITER(VWF, signed), VATTR(1, VWF, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfsqrt, vfsqrt, (2, VITER(VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vfrsqrt7, vfrsqrt7, (2, VITER(VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vfrec7, vfrec7, (2, VITER(VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vfmax, vfmax, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfmin, vfmin, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfsgnj, vfsgnj, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfsgnjn, vfsgnjn, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfsgnjx, vfsgnjx, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfabs, vfabs, (2, VITER(VF, signed), VATTR(0, VF, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vmfeq, vmfeq, (3, VATTR(1, VM, signed), VITER(VF, signed), VATTR(1, VF, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vmfne, vmfne, (3, VATTR(1, VM, signed), VITER(VF, signed), VATTR(1, VF, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vmflt, vmflt, (3, VATTR(1, VM, signed), VITER(VF, signed), VATTR(1, VF, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vmfle, vmfle, (3, VATTR(1, VM, signed), VITER(VF, signed), VATTR(1, VF, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vmfgt, vmfgt, (3, VATTR(1, VM, signed), VITER(VF, signed), VATTR(1, VF, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vmfge, vmfge, (3, VATTR(1, VM, signed), VITER(VF, signed), VATTR(1, VF, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vf)
+DEF_RVV_FUNCTION(vfclass, vfclass, (2, VATTR(1, VMAP, unsigned), VITER(VF, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vfmerge, vfmerge, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), PAT_tail | PAT_merge, pred_tail, OP_vfm)
+DEF_RVV_FUNCTION(vfmv, vfmv, (2, VITER(VF, signed), VATTR(0, VSUB, signed)), PAT_tail, pred_tail, OP_v_f)
+DEF_RVV_FUNCTION(vfcvt_x_f_v, vfcvt_f2i, (2, VATTR(1, VMAP, signed), VITER(VF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfcvt_xu_f_v, vfcvt_f2u, (2, VATTR(1, VMAP, unsigned), VITER(VF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfcvt_rtz_x_f_v, vfcvt_rtz_f2i, (2, VATTR(1, VMAP, signed), VITER(VF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfcvt_rtz_xu_f_v, vfcvt_rtz_f2u, (2, VATTR(1, VMAP, unsigned), VITER(VF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfcvt_f_x_v, vfcvt_i2f, (2, VITER(VF, signed), VATTR(0, VMAP, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfcvt_f_xu_v, vfcvt_u2f, (2, VITER(VF, signed), VATTR(0, VMAP, unsigned)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfwcvt_x_f_v, vfwcvt_f2i, (2, VATTR(1, VWMAP, signed), VITER(VWF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfwcvt_xu_f_v, vfwcvt_f2u, (2, VATTR(1, VWMAP, unsigned), VITER(VWF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfwcvt_rtz_x_f_v, vfwcvt_rtz_f2i, (2, VATTR(1, VWMAP, signed), VITER(VWF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfwcvt_rtz_xu_f_v, vfwcvt_rtz_f2u, (2, VATTR(1, VWMAP, unsigned), VITER(VWF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfwcvt_f_x_v, vfwcvt_i2f, (2, VATTR(1, VWFMAP, signed), VITER(VWINOQI, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfwcvt_f_xu_v, vfwcvt_u2f, (2, VATTR(1, VWFMAP, signed), VITER(VWINOQI, unsigned)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfwcvt_f_f_v, vfwcvt_f2f, (2, VATTR(1, VW, signed), VITER(VWF, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfncvt_x_f_w, vfncvt_f2i, (2, VITER(VWINOQI, signed), VATTR(0, VWFMAP, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfncvt_xu_f_w, vfncvt_f2u, (2, VITER(VWINOQI, unsigned), VATTR(0, VWFMAP, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfncvt_rtz_x_f_w, vfncvt_rtz_f2i, (2, VITER(VWINOQI, signed), VATTR(0, VWFMAP, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfncvt_rtz_xu_f_w, vfncvt_rtz_f2u, (2, VITER(VWINOQI, unsigned), VATTR(0, VWFMAP, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfncvt_f_x_w, vfncvt_i2f, (2, VITER(VWF, signed), VATTR(0, VWMAP, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfncvt_f_xu_w, vfncvt_u2f, (2, VITER(VWF, signed), VATTR(0, VWMAP, unsigned)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfncvt_f_f_w, vfncvt_f2f, (2, VITER(VWF, signed), VATTR(0, VW, signed)), pat_mask_tail, pred_all, OP_none)
+DEF_RVV_FUNCTION(vfncvt_rod_f_f_w, vfncvt_f2rodf, (2, VITER(VWF, signed), VATTR(0, VW, signed)), pat_mask_tail, pred_all, OP_none)
+/* 14. Vector Reduction Operations. */
+DEF_RVV_FUNCTION(vredsum, vredsum, (3, VATTR(1, VLMUL1, signed), VITER(VI, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredsum, vredsum, (3, VATTR(1, VLMUL1, unsigned), VITER(VI, unsigned), VATTR(1, VLMUL1, unsigned)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredmax, vredmax, (3, VATTR(1, VLMUL1, signed), VITER(VI, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredmaxu, vredmaxu, (3, VATTR(1, VLMUL1, unsigned), VITER(VI, unsigned), VATTR(1, VLMUL1, unsigned)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredmin, vredmin, (3, VATTR(1, VLMUL1, signed), VITER(VI, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredminu, vredminu, (3, VATTR(1, VLMUL1, unsigned), VITER(VI, unsigned), VATTR(1, VLMUL1, unsigned)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredand, vredand, (3, VATTR(1, VLMUL1, signed), VITER(VI, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredand, vredand, (3, VATTR(1, VLMUL1, unsigned), VITER(VI, unsigned), VATTR(1, VLMUL1, unsigned)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredor, vredor, (3, VATTR(1, VLMUL1, signed), VITER(VI, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredor, vredor, (3, VATTR(1, VLMUL1, unsigned), VITER(VI, unsigned), VATTR(1, VLMUL1, unsigned)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredxor, vredxor, (3, VATTR(1, VLMUL1, signed), VITER(VI, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vredxor, vredxor, (3, VATTR(1, VLMUL1, unsigned), VITER(VI, unsigned), VATTR(1, VLMUL1, unsigned)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vwredsum, vwredsum, (3, VATTR(1, VWLMUL1, signed), VITER(VWREDI, signed), VATTR(1, VWLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vwredsumu, vwredsumu, (3, VATTR(1, VWLMUL1, unsigned), VITER(VWREDI, unsigned), VATTR(1, VWLMUL1, unsigned)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vfredosum, vfredosum, (3, VATTR(1, VLMUL1, signed), VITER(VF, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vfredusum, vfredusum, (3, VATTR(1, VLMUL1, signed), VITER(VF, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vfredmax, vfredmax, (3, VATTR(1, VLMUL1, signed), VITER(VF, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vfredmin, vfredmin, (3, VATTR(1, VLMUL1, signed), VITER(VF, signed), VATTR(1, VLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vfwredosum, vfwredosum, (3, VATTR(1, VWLMUL1, signed), VITER(VWREDF, signed), VATTR(1, VWLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+DEF_RVV_FUNCTION(vfwredusum, vfwredusum, (3, VATTR(1, VWLMUL1, signed), VITER(VWREDF, signed), VATTR(1, VWLMUL1, signed)), pat_void_dest_ignore_mp, pred_reduce, OP_vs)
+/* 15. Vector Mask Instructions. */
+DEF_RVV_FUNCTION(vmand, vmand, (3, VITER(VB, signed), VATTR(0, VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_mm)
+DEF_RVV_FUNCTION(vmor, vmor, (3, VITER(VB, signed), VATTR(0, VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_mm)
+DEF_RVV_FUNCTION(vmxor, vmxor, (3, VITER(VB, signed), VATTR(0, VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_mm)
+DEF_RVV_FUNCTION(vmnand, vmnand, (3, VITER(VB, signed), VATTR(0, VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_mm)
+DEF_RVV_FUNCTION(vmnor, vmnor, (3, VITER(VB, signed), VATTR(0, VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_mm)
+DEF_RVV_FUNCTION(vmxnor, vmxnor, (3, VITER(VB, signed), VATTR(0, VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_mm)
+DEF_RVV_FUNCTION(vmandn, vmandn, (3, VITER(VB, signed), VATTR(0, VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_mm)
+DEF_RVV_FUNCTION(vmorn, vmorn, (3, VITER(VB, signed), VATTR(0, VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_mm)
+DEF_RVV_FUNCTION(vmmv, vmmv, (2, VITER(VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_m)
+DEF_RVV_FUNCTION(vmnot, vmnot, (2, VITER(VB, signed), VATTR(0, VB, signed)), PAT_none, PRED_void, OP_m)
+DEF_RVV_FUNCTION(vmclr, vmclr, (1, VITER(VB, signed)), PAT_none, PRED_void, OP_m)
+DEF_RVV_FUNCTION(vmset, vmset, (1, VITER(VB, signed)), PAT_none, PRED_void, OP_m)
+DEF_RVV_FUNCTION(vcpop, vcpop, (2, VITER(VB, signed), VATTR(0, VB, signed)), pat_mask_ignore_policy, pred_mask2, OP_m)
+DEF_RVV_FUNCTION(vfirst, vfirst, (2, VITER(VB, signed), VATTR(0, VB, signed)), pat_mask_ignore_policy, pred_mask2, OP_m)
+DEF_RVV_FUNCTION(vmsbf, vmsbf, (2, VITER(VB, signed), VATTR(0, VB, signed)), pat_mask_ignore_tp, pred_mask, OP_m)
+DEF_RVV_FUNCTION(vmsif, vmsif, (2, VITER(VB, signed), VATTR(0, VB, signed)), pat_mask_ignore_tp, pred_mask, OP_m)
+DEF_RVV_FUNCTION(vmsof, vmsof, (2, VITER(VB, signed), VATTR(0, VB, signed)), pat_mask_ignore_tp, pred_mask, OP_m)
+DEF_RVV_FUNCTION(viota, viota, (2, VITER(VI, unsigned), VATTR(0, VM, signed)), pat_mask_tail, pred_all, OP_m)
+DEF_RVV_FUNCTION(vid, vid, (1, VITER(VI, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vid, vid, (1, VITER(VI, unsigned)), pat_mask_tail, pred_all, OP_v)
+/* 16. Vector Permutation Instructions. */
+DEF_RVV_FUNCTION(vmv_x_s, vmv_x_s, (2, VATTR(1, VSUB, signed), VITER(VI, signed)), PAT_none, PRED_none, OP_none)
+DEF_RVV_FUNCTION(vmv_x_s, vmv_x_s, (2, VATTR(1, VSUB, unsigned), VITER(VI, unsigned)), PAT_none, PRED_none, OP_none)
+DEF_RVV_FUNCTION(vmv_s_x, vmv_s_x, (2, VITER(VI, signed), VATTR(0, VSUB, signed)), pat_tail_void_dest, pred_tail, OP_none)
+DEF_RVV_FUNCTION(vmv_s_x, vmv_s_x, (2, VITER(VI, unsigned), VATTR(0, VSUB, unsigned)), pat_tail_void_dest, pred_tail, OP_none)
+DEF_RVV_FUNCTION(vfmv_f_s, vfmv_f_s, (2, VATTR(1, VSUB, signed), VITER(VF, signed)), PAT_none, PRED_none, OP_none)
+DEF_RVV_FUNCTION(vfmv_s_f, vfmv_s_f, (2, VITER(VF, signed), VATTR(0, VSUB, signed)), pat_tail_void_dest, pred_tail, OP_none)
+DEF_RVV_FUNCTION(vslideup, vslideup, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VSUB, signed)), pat_mask_tail_dest, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslideup, vslideup, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VSUB, unsigned)), pat_mask_tail_dest, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslideup, vslideup, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VSUB, signed)), pat_mask_tail_dest, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslidedown, vslidedown, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VSUB, signed)), pat_mask_tail_dest, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslidedown, vslidedown, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VSUB, unsigned)), pat_mask_tail_dest, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslidedown, vslidedown, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VSUB, signed)), pat_mask_tail_dest, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslide1up, vslide1up, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VSUB, signed)), pat_mask_tail, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslide1up, vslide1up, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VSUB, unsigned)), pat_mask_tail, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslide1down, vslide1down, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VSUB, signed)), pat_mask_tail, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vslide1down, vslide1down, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VSUB, unsigned)), pat_mask_tail, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vfslide1up, vfslide1up, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VSUB, signed)), pat_mask_tail, pred_all, OP_vf)
+DEF_RVV_FUNCTION(vfslide1down, vfslide1down, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VSUB, signed)), pat_mask_tail, pred_all, OP_vf)
+DEF_RVV_FUNCTION(vrgather, vrgather, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VMAP, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vrgather, vrgather, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VMAP, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vrgather, vrgather, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VMAP, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vrgatherei16, vrgatherei16, (3, VITER(VI16, signed), VATTR(0, VI16, signed), VATTR(0, VMAPI16, unsigned)), pat_mask_tail, pred_all, OP_vv)
+DEF_RVV_FUNCTION(vrgatherei16, vrgatherei16, (3, VITER(VI16, unsigned), VATTR(0, VI16, unsigned), VATTR(0, VMAPI16, unsigned)), pat_mask_tail, pred_all, OP_vv)
+DEF_RVV_FUNCTION(vrgatherei16, vrgatherei16, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VMAPI16, unsigned)), pat_mask_tail, pred_all, OP_vv)
+DEF_RVV_FUNCTION(vcompress, vcompress, (2, VITER(VI, signed), VATTR(0, VI, signed)), PAT_tail | PAT_void_dest | PAT_merge, pred_tail, OP_vm)
+DEF_RVV_FUNCTION(vcompress, vcompress, (2, VITER(VI, unsigned), VATTR(0, VI, unsigned)), PAT_tail | PAT_void_dest | PAT_merge, pred_tail, OP_vm)
+DEF_RVV_FUNCTION(vcompress, vcompress, (2, VITER(VF, signed), VATTR(0, VF, signed)), PAT_tail | PAT_void_dest | PAT_merge, pred_tail, OP_vm)
#undef REQUIRED_EXTENSIONS
#undef DEF_RVV_FUNCTION
#undef VITER
@@ -1533,6 +1533,1373 @@ public:
virtual rtx expand (const function_instance &, tree, rtx) const override;
};
+/* A function_base for reduction functions. */
+class reduceop : public basic_alu
+{
+public:
+ // use the same construction function as the basic_alu
+ using basic_alu::basic_alu;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual tree get_mask_type (tree, const function_instance &, const vec<tree> &) const override;
+
+ virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+};
+
+/* A function_base for vsadd functions. */
+class vsadd : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vsaddu functions. */
+class vsaddu : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vaadd functions. */
+class vaadd : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vaaddu functions. */
+class vaaddu : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vssub functions. */
+class vssub : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vssubu functions. */
+class vssubu : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vasub functions. */
+class vasub : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vasubu functions. */
+class vasubu : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vssrl functions. */
+class vssrl : public vshift
+{
+public:
+ // use the same construction function as the vshift
+ using vshift::vshift;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vssra functions. */
+class vssra : public vshift
+{
+public:
+ // use the same construction function as the vshift
+ using vshift::vshift;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vsmul functions. */
+class vsmul : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vnclip functions. */
+class vnclip : public vshift
+{
+public:
+ // use the same construction function as the vshift
+ using vshift::vshift;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vnclipu functions. */
+class vnclipu : public vshift
+{
+public:
+ // use the same construction function as the vshift
+ using vshift::vshift;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for funop functions. */
+class funop : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual unsigned int call_properties () const override;
+};
+
+/* A function_base for fbinop functions. */
+class fbinop : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual unsigned int call_properties () const override;
+};
+
+/* A function_base for fbinop functions. */
+class fwbinop : public wbinop
+{
+public:
+ // use the same construction function as the wbinop
+ using wbinop::wbinop;
+
+ virtual unsigned int call_properties () const override;
+};
+
+/* A function_base for fternop functions. */
+class fternop : public ternop
+{
+public:
+ // use the same construction function as the binop
+ using ternop::ternop;
+
+ virtual unsigned int call_properties () const override;
+};
+
+/* A function_base for vfadd functions. */
+class vfadd : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfsub functions. */
+class vfsub : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmul functions. */
+class vfmul : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfdiv functions. */
+class vfdiv : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfrsub functions. */
+class vfrsub : public fbinop
+{
+public:
+ // use the same construction function as the binop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfrdiv functions. */
+class vfrdiv : public fbinop
+{
+public:
+ // use the same construction function as the binop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfneg functions. */
+class vfneg : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwadd functions. */
+class vfwadd : public fwbinop
+{
+public:
+ // use the same construction function as the fwbinop
+ using fwbinop::fwbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwsub functions. */
+class vfwsub : public fwbinop
+{
+public:
+ // use the same construction function as the fwbinop
+ using fwbinop::fwbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwmul functions. */
+class vfwmul : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmacc functions. */
+class vfmacc : public fternop
+{
+public:
+ // use the same construction function as the fternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmsac functions. */
+class vfmsac : public fternop
+{
+public:
+ // use the same construction function as the fternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfnmacc functions. */
+class vfnmacc : public fternop
+{
+public:
+ // use the same construction function as the fternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfnmsac functions. */
+class vfnmsac : public fternop
+{
+public:
+ // use the same construction function as the fternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmadd functions. */
+class vfmadd : public fternop
+{
+public:
+ // use the same construction function as the fternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfnmadd functions. */
+class vfnmadd : public fternop
+{
+public:
+ // use the same construction function as the fternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmsub functions. */
+class vfmsub : public fternop
+{
+public:
+ // use the same construction function as the fternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfnmsub functions. */
+class vfnmsub : public fternop
+{
+public:
+ // use the same construction function as the fternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwmacc functions. */
+class vfwmacc : public fternop
+{
+public:
+ // use the same construction function as the ternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwmsac functions. */
+class vfwmsac : public fternop
+{
+public:
+ // use the same construction function as the ternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwnmacc functions. */
+class vfwnmacc : public fternop
+{
+public:
+ // use the same construction function as the ternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwnmsac functions. */
+class vfwnmsac : public fternop
+{
+public:
+ // use the same construction function as the ternop
+ using fternop::fternop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfsqrt functions. */
+class vfsqrt : public funop
+{
+public:
+ // use the same construction function as the unop
+ using funop::funop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfrsqrt7 functions. */
+class vfrsqrt7 : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfrec7 functions. */
+class vfrec7 : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmax functions. */
+class vfmax : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmin functions. */
+class vfmin : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfsgnj, vfsgnjn and vfsgnjx functions. */
+class vfsgnj : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfsgnjn functions. */
+class vfsgnjn : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfsgnjx functions. */
+class vfsgnjx : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfabs functions. */
+class vfabs : public funop
+{
+public:
+ // use the same construction function as the unop
+ using funop::funop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfcmp functions. */
+class vfcmp : public fbinop
+{
+public:
+ // use the same construction function as the fbinop
+ using fbinop::fbinop;
+
+ virtual char * assemble_name (function_instance &) override;
+};
+
+/* A function_base for vmfeq functions. */
+class vmfeq : public vfcmp
+{
+public:
+ // use the same construction function as the vfcmp
+ using vfcmp::vfcmp;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmfne functions. */
+class vmfne : public vfcmp
+{
+public:
+ // use the same construction function as the vfcmp
+ using vfcmp::vfcmp;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmflt functions. */
+class vmflt : public vfcmp
+{
+public:
+ // use the same construction function as the vfcmp
+ using vfcmp::vfcmp;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmfle functions. */
+class vmfle : public vfcmp
+{
+public:
+ // use the same construction function as the vfcmp
+ using vfcmp::vfcmp;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmfgt functions. */
+class vmfgt : public vfcmp
+{
+public:
+ // use the same construction function as the vfcmp
+ using vfcmp::vfcmp;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmfge functions. */
+class vmfge : public vfcmp
+{
+public:
+ // use the same construction function as the vfcmp
+ using vfcmp::vfcmp;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfclass functions. */
+class vfclass : public unop
+{
+public:
+ // use the same construction function as the binop
+ using unop::unop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmerge functions. */
+class vfmerge : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual size_t get_position_of_dest_arg (enum predication_index) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmv functions. */
+class vfmv : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual bool can_be_overloaded_p (const function_instance &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfcvt_x_f_v functions. */
+class vfcvt_f2i : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfcvt_xu_f_v functions. */
+class vfcvt_f2u : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfcvt_rtz_x_f_v functions. */
+class vfcvt_rtz_f2i : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfcvt_rtz_xu_f_v functions. */
+class vfcvt_rtz_f2u : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfcvt_f_x_v functions. */
+class vfcvt_i2f : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfcvt_f_xu_v functions. */
+class vfcvt_u2f : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwcvt_x_f_v functions. */
+class vfwcvt_f2i : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwcvt_xu_f_v functions. */
+class vfwcvt_f2u : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwcvt_rtz_x_f_v functions. */
+class vfwcvt_rtz_f2i : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwcvt_rtz_xu_f_v functions. */
+class vfwcvt_rtz_f2u : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwcvt_f_x_v functions. */
+class vfwcvt_i2f : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwcvt_f_xu_v functions. */
+class vfwcvt_u2f : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwcvt_f_f_v functions. */
+class vfwcvt_f2f : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfncvt_x_f_w functions. */
+class vfncvt_f2i : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfncvt_xu_f_w functions. */
+class vfncvt_f2u : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfncvt_rtz_x_f_w functions. */
+class vfncvt_rtz_f2i : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfncvt_rtz_xu_f_w functions. */
+class vfncvt_rtz_f2u : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfncvt_f_x_w functions. */
+class vfncvt_i2f : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfncvt_f_xu_w functions. */
+class vfncvt_u2f : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfncvt_f_f_w functions. */
+class vfncvt_f2f : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfncvt_rod_f_f_w functions. */
+class vfncvt_f2rodf : public funop
+{
+public:
+ // use the same construction function as the funop
+ using funop::funop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vredsum functions. */
+class vredsum : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vredmax functions. */
+class vredmax : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vredmaxu functions. */
+class vredmaxu : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vredmin functions. */
+class vredmin : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vredminu functions. */
+class vredminu : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vredand functions. */
+class vredand : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vredor functions. */
+class vredor : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vredxor functions. */
+class vredxor : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwredsum functions. */
+class vwredsum : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwredsumu functions. */
+class vwredsumu : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for freduceop functions. */
+class freduceop : public reduceop
+{
+public:
+ // use the same construction function as the reduceop
+ using reduceop::reduceop;
+
+ virtual unsigned int call_properties () const override;
+};
+
+/* A function_base for vfredosum functions. */
+class vfredosum : public freduceop
+{
+public:
+ // use the same construction function as the freduceop
+ using freduceop::freduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfredusum functions. */
+class vfredusum : public freduceop
+{
+public:
+ // use the same construction function as the freduceop
+ using freduceop::freduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfredmax functions. */
+class vfredmax : public freduceop
+{
+public:
+ // use the same construction function as the freduceop
+ using freduceop::freduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfredmin functions. */
+class vfredmin : public freduceop
+{
+public:
+ // use the same construction function as the freduceop
+ using freduceop::freduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwredosum functions. */
+class vfwredosum : public freduceop
+{
+public:
+ // use the same construction function as the freduceop
+ using freduceop::freduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfwredusum functions. */
+class vfwredusum : public freduceop
+{
+public:
+ // use the same construction function as the freduceop
+ using freduceop::freduceop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmand functions. */
+class vmand : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmor functions. */
+class vmor : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmxor functions. */
+class vmxor : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmnand functions. */
+class vmnand : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmnor functions. */
+class vmnor : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmxnor functions. */
+class vmxnor : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmandn functions. */
+class vmandn : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmorn functions. */
+class vmorn : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmmv functions. */
+class vmmv : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmnot functions. */
+class vmnot : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmclr functions. */
+class vmclr : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+
+ virtual bool can_be_overloaded_p (const function_instance &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmset functions. */
+class vmset : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+
+ virtual bool can_be_overloaded_p (const function_instance &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vcpop functions. */
+class vcpop : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual tree get_return_type (const function_instance &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfirst functions. */
+class vfirst : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual tree get_return_type (const function_instance &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsbf functions. */
+class vmsbf : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsif functions. */
+class vmsif : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsof functions. */
+class vmsof : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for viota functions. */
+class viota : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual bool can_be_overloaded_p (const function_instance &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vid functions. */
+class vid : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+
+ virtual bool can_be_overloaded_p (const function_instance &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmv_x_s functions. */
+class vmv_x_s : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmv_s_x functions. */
+class vmv_s_x : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmv_f_s functions. */
+class vfmv_f_s : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfmv_s_f functions. */
+class vfmv_s_f : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vslideup functions. */
+class vslideup : public vshift
+{
+public:
+ // use the same construction function as the vshift
+ using vshift::vshift;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vslidedown functions. */
+class vslidedown : public vshift
+{
+public:
+ // use the same construction function as the vshift
+ using vshift::vshift;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vslide1up functions. */
+class vslide1up : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vslide1down functions. */
+class vslide1down : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfslide1up functions. */
+class vfslide1up : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vfslide1down functions. */
+class vfslide1down : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vrgather functions. */
+class vrgather : public vshift
+{
+public:
+ // use the same construction function as the vshift
+ using vshift::vshift;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vrgather functions. */
+class vrgatherei16 : public binop
+{
+public:
+ // use the same construction function as the binop
+ using binop::binop;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+class vcompress : public unop
+{
+public:
+ // use the same construction function as the unop
+ using unop::unop;
+
+ virtual size_t get_position_of_dest_arg (enum predication_index) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
} // namespace riscv_vector
#endif // end GCC_RISCV_VECTOR_BUILTINS_FUNCTIONS_H
\ No newline at end of file
@@ -905,7 +905,7 @@ rvv_adjust_frame (rtx target, poly_int64 offset, bool epilogue)
}
/* Helper functions for handling sew=64 on RV32 system. */
-bool
+static bool
imm32_p (rtx a)
{
if (!CONST_SCALAR_INT_P (a))
@@ -928,7 +928,7 @@ enum GEN_CLASS
};
/* Helper functions for handling sew=64 on RV32 system. */
-enum GEN_CLASS
+static enum GEN_CLASS
modify_operands (machine_mode Vmode, machine_mode VSImode,
machine_mode VMSImode, machine_mode VSUBmode, rtx *operands,
bool (*imm5_p) (rtx), int i, bool reverse, unsigned int unspec)
@@ -970,7 +970,7 @@ modify_operands (machine_mode Vmode, machine_mode VSImode,
}
/* Helper functions for handling sew=64 on RV32 system. */
-bool
+static bool
emit_op5_vmv_v_x (machine_mode Vmode, machine_mode VSImode,
machine_mode VMSImode, machine_mode VSUBmode, rtx *operands,
int i)
@@ -994,6 +994,51 @@ emit_op5_vmv_v_x (machine_mode Vmode, machine_mode VSImode,
return false;
}
+/* Helper functions for handling sew=64 on RV32 system. */
+static bool
+emit_op5_vmv_s_x (machine_mode Vmode, machine_mode VSImode,
+ machine_mode VSUBmode, rtx *operands, int i)
+{
+ if (!TARGET_64BIT && VSUBmode == DImode)
+ {
+ if (!imm32_p (operands[i]))
+ {
+ rtx s = operands[i];
+ if (CONST_SCALAR_INT_P (s))
+ {
+ s = force_reg (DImode, s);
+ }
+
+ rtx hi = gen_highpart (SImode, s);
+ rtx lo = gen_lowpart (SImode, s);
+ rtx vlx2 = gen_vlx2 (operands[3], Vmode, VSImode);
+
+ rtx vret = operands[0];
+ rtx vd = operands[1];
+ if (vd == const0_rtx)
+ {
+ vd = gen_reg_rtx (Vmode);
+ }
+ rtx vd_si = gen_lowpart (VSImode, vd);
+
+ emit_insn (gen_vslide_vx (UNSPEC_SLIDEDOWN, VSImode, vd_si,
+ const0_rtx, vd_si, vd_si, const2_rtx, vlx2,
+ operands[4]));
+ emit_insn (gen_vslide1_vx_internal (UNSPEC_SLIDE1UP, VSImode, vd_si,
+ const0_rtx, vd_si, vd_si, hi,
+ vlx2, operands[4]));
+ emit_insn (gen_vslide1_vx_internal (UNSPEC_SLIDE1UP, VSImode, vd_si,
+ const0_rtx, vd_si, vd_si, lo, vlx2,
+ operands[4]));
+
+ emit_insn (gen_rtx_SET (vret, gen_lowpart (Vmode, vd_si)));
+
+ return true;
+ }
+ }
+ return false;
+}
+
/* Helper functions for handling sew=64 on RV32 system. */
void
emit_op5 (unsigned int unspec, machine_mode Vmode, machine_mode VSImode,
@@ -1008,6 +1053,13 @@ emit_op5 (unsigned int unspec, machine_mode Vmode, machine_mode VSImode,
return;
}
}
+ else if (unspec == UNSPEC_VMVS)
+ {
+ if (emit_op5_vmv_s_x (Vmode, VSImode, VSUBmode, operands, i))
+ {
+ return;
+ }
+ }
enum GEN_CLASS gen_class = modify_operands (
Vmode, VSImode, VMSImode, VSUBmode, operands, imm5_p, i, reverse, unspec);
@@ -1038,6 +1090,85 @@ emit_op6 (unsigned int unspec ATTRIBUTE_UNUSED, machine_mode Vmode,
operands[4], operands[5]));
}
+/* Helper functions for handling sew=64 on RV32 system. */
+static bool
+emit_op7_slide1 (unsigned int unspec, machine_mode Vmode, machine_mode VSImode,
+ machine_mode VSUBmode, rtx *operands, int i)
+{
+ if (!TARGET_64BIT && VSUBmode == DImode)
+ {
+ if (!imm32_p (operands[i]))
+ {
+ rtx s = operands[i];
+ if (CONST_SCALAR_INT_P (s))
+ {
+ s = force_reg (DImode, s);
+ }
+
+ rtx hi = gen_highpart (SImode, s);
+ rtx lo = gen_lowpart (SImode, s);
+
+ rtx vret = operands[0];
+ rtx mask = operands[1];
+ rtx vs = operands[3];
+ rtx avl = operands[5];
+ rtx vlx2 = gen_vlx2 (avl, Vmode, VSImode);
+ rtx vs_si = gen_lowpart (VSImode, vs);
+ rtx vtemp;
+ if (rtx_equal_p (operands[2], const0_rtx))
+ {
+ vtemp = gen_reg_rtx (VSImode);
+ }
+ else
+ {
+ vtemp = gen_lowpart (VSImode, operands[2]);
+ }
+
+ if (unspec == UNSPEC_SLIDE1UP)
+ {
+ rtx v1 = gen_reg_rtx (VSImode);
+
+ emit_insn (gen_vslide1_vx_internal (UNSPEC_SLIDE1UP, VSImode, v1,
+ const0_rtx, const0_rtx, vs_si,
+ hi, vlx2, operands[6]));
+ emit_insn (gen_vslide1_vx_internal (UNSPEC_SLIDE1UP, VSImode,
+ vtemp, const0_rtx, const0_rtx,
+ v1, lo, vlx2, operands[6]));
+ }
+ else
+ {
+ emit_insn (gen_vslide1_vx_internal (
+ UNSPEC_SLIDE1DOWN, VSImode, vtemp, const0_rtx, const0_rtx,
+ vs_si, force_reg (GET_MODE (lo), lo), vlx2, operands[6]));
+ emit_insn (gen_vslide1_vx_internal (
+ UNSPEC_SLIDE1DOWN, VSImode, vtemp, const0_rtx, const0_rtx,
+ vtemp, force_reg (GET_MODE (hi), hi), vlx2, operands[6]));
+ }
+
+ if (rtx_equal_p (mask, const0_rtx))
+ {
+ emit_insn (gen_rtx_SET (vret, gen_lowpart (Vmode, vtemp)));
+ }
+ else
+ {
+ rtx dest = operands[2];
+ if (rtx_equal_p (dest, const0_rtx))
+ {
+ dest = vret;
+ }
+ emit_insn (gen_vmerge_vvm (Vmode, dest, mask, dest, dest,
+ gen_lowpart (Vmode, vtemp),
+ force_reg_for_over_uimm (avl),
+ operands[6]));
+
+ emit_insn (gen_rtx_SET (vret, dest));
+ }
+
+ return true;
+ }
+ }
+ return false;
+}
/* Helper functions for handling sew=64 on RV32 system. */
void
@@ -1046,6 +1177,14 @@ emit_op7 (unsigned int unspec, machine_mode Vmode, machine_mode VSImode,
gen_7 *gen_vx, gen_7 *gen_vx_32bit, gen_7 *gen_vv, imm_p *imm5_p,
int i, bool reverse)
{
+ if (unspec == UNSPEC_SLIDE1UP || unspec == UNSPEC_SLIDE1DOWN)
+ {
+ if (emit_op7_slide1 (unspec, Vmode, VSImode, VSUBmode, operands, i))
+ {
+ return;
+ }
+ }
+
enum GEN_CLASS gen_class = modify_operands (
Vmode, VSImode, VMSImode, VSUBmode, operands, imm5_p, i, reverse, unspec);
@@ -792,6 +792,9 @@
UNSPEC_VMIN UNSPEC_VMINU UNSPEC_VMAX UNSPEC_VMAXU
UNSPEC_VMUL UNSPEC_VMULH UNSPEC_VMULHU UNSPEC_VMULHSU
UNSPEC_VDIV UNSPEC_VDIVU UNSPEC_VREM UNSPEC_VREMU
+ UNSPEC_VSADD UNSPEC_VSADDU UNSPEC_VSSUB UNSPEC_VSSUBU
+ UNSPEC_VAADD UNSPEC_VAADDU UNSPEC_VASUB UNSPEC_VASUBU
+ UNSPEC_VSMUL
])
(define_int_iterator VXMOP [
@@ -1222,6 +1222,77 @@
}
)
+;; vmv.s.x
+(define_expand "@v<vxoptab><mode>_s_x"
+ [(unspec [
+ (match_operand:VI 0 "register_operand")
+ (match_operand:VI 1 "vector_reg_or_const0_operand")
+ (match_operand:<VSUB> 2 "reg_or_const_int_operand")
+ (match_operand 3 "p_reg_or_const_csr_operand")
+ (match_operand 4 "const_int_operand")
+ ] VMVSOP)]
+ "TARGET_VECTOR"
+ {
+ emit_op5 (
+ <VXOPTAB>,
+ <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+ <VSUB>mode,
+ operands,
+ gen_v<vxoptab><mode>_s_x_internal,
+ gen_v<vxoptab><vi_to_v64biti>_s_x_32bit,
+ NULL,
+ satisfies_constraint_<immptab>,
+ 2, false
+ );
+ DONE;
+ }
+)
+
+;; vslide1
+(define_expand "@vslide1<vxoptab><mode>_vx"
+ [(unspec [
+ (match_operand:VI 0 "register_operand")
+ (match_operand:<VM> 1 "vector_reg_or_const0_operand")
+ (match_operand:VI 2 "vector_reg_or_const0_operand")
+ (match_operand:VI 3 "register_operand")
+ (match_operand:<VSUB> 4 "reg_or_const_int_operand")
+ (match_operand 5 "reg_or_const_int_operand")
+ (match_operand 6 "const_int_operand")
+ ] VSLIDE1)]
+ "TARGET_VECTOR"
+ {
+ emit_op7 (
+ <VXOPTAB>,
+ <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+ <VSUB>mode,
+ operands,
+ gen_vslide1<vxoptab><mode>_vx_internal,
+ gen_vslide1<vxoptab><vi_to_v64biti>_vx_32bit,
+ NULL,
+ satisfies_constraint_<immptab>,
+ 4, false
+ );
+ DONE;
+ }
+)
+
+;; helper expand to double the vl operand
+(define_expand "vmv_vlx2_help"
+ [
+ (set (match_operand:SI 0 "register_operand")
+ (ashift:SI (match_operand:SI 1 "register_operand")
+ (const_int 1)))
+ (set (match_operand:SI 2 "register_operand")
+ (ltu:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 2)
+ (minus:SI (reg:SI X0_REGNUM) (match_dup 2)))
+ (set (match_dup 0)
+ (ior:SI (match_dup 0) (match_dup 2)))
+ ]
+ "TARGET_VECTOR"
+ ""
+)
+
;; -------------------------------------------------------------------------------
;; ---- 11. Vector Integer Arithmetic Instructions
;; -------------------------------------------------------------------------------
@@ -3521,14 +3592,14 @@
"vmv.v.v\t%0,%2"
[(set_attr "type" "vmove")
(set_attr "mode" "<MODE>")])
-
+
;; Vector-Scalar Integer Move.
(define_insn "@vmv<mode>_v_x_internal"
[(set (match_operand:VI 0 "register_operand" "=vr,vr,vr,vr")
(unspec:VI
[(match_operand:VI 1 "vector_reg_or_const0_operand" "0,0,J,J")
(vec_duplicate:VI
- (match_operand:<VSUB> 2 "reg_or_simm5_operand" "r,Ws5,r,Ws5"))
+ (match_operand:<VSUB> 2 "reg_or_simm5_operand" "r,Ws5,r,Ws5"))
(match_operand 3 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
(match_operand 4 "const_int_operand")
(reg:SI VL_REGNUM)
@@ -3561,46 +3632,2401 @@
[(set_attr "type" "vmove")
(set_attr "mode" "<MODE>")])
-;; Vector-Scalar Floating-Point Move.
-(define_insn "@vfmv<mode>_v_f"
- [(set (match_operand:VF 0 "register_operand" "=vr,vr")
- (unspec:VF
- [(match_operand:VF 1 "vector_reg_or_const0_operand" "0,J")
- (vec_duplicate:VF
- (match_operand:<VSUB> 2 "register_operand" "f,f"))
- (match_operand 3 "p_reg_or_const_csr_operand" "rK,rK")
- (match_operand 4 "const_int_operand")
+;; -------------------------------------------------------------------------------
+;; ---- 12. Vector Fixed-Point Arithmetic Instructions
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 12.1 Vector Single-Width Saturating Add and Subtract
+;; - 12.2 Vector Single-Width Aaveraging Add and Subtract
+;; - 12.3 Vector Single-Width Fractional Multiply with Rounding and Saturation
+;; - 12.5 Vector Single-Width Scaling Shift Instructions
+;; - 12.6 Vector Narrowing Fixed-Point Clip Instructions
+;; -------------------------------------------------------------------------------
+
+;; Vector-Vector Single-Width Saturating Add.
+(define_insn "@v<optab><mode>_vv"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (any_satplus:VI
+ (match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand:VI 4 "vector_arith_operand" "vr,vi,vr,vi,vr,vi,vr,vi"))
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
(reg:SI VL_REGNUM)
(reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
- "TARGET_VECTOR"
- "vfmv.v.f\t%0,%2"
- [(set_attr "type" "vmove")
- (set_attr "mode" "<MODE>")])
+ "TARGET_VECTOR"
+ "@
+ v<insn>.vv\t%0,%3,%4,%1.t
+ v<insn>.vi\t%0,%3,%v4,%1.t
+ v<insn>.vv\t%0,%3,%4,%1.t
+ v<insn>.vi\t%0,%3,%v4,%1.t
+ v<insn>.vv\t%0,%3,%4
+ v<insn>.vi\t%0,%3,%v4
+ v<insn>.vv\t%0,%3,%4
+ v<insn>.vi\t%0,%3,%v4"
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
-;; vmclr.m vd -> vmxor.mm vd,vd,vd # Clear mask register
-(define_insn "@vmclr<mode>_m"
- [(set (match_operand:VB 0 "register_operand" "=vr")
- (unspec:VB
- [(vec_duplicate:VB (const_int 0))
- (match_operand 1 "p_reg_or_const_csr_operand" "rK")
- (match_operand 2 "const_int_operand")
- (reg:SI VL_REGNUM)
- (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
- "TARGET_VECTOR"
- "vmclr.m\t%0"
- [(set_attr "type" "vmask")
- (set_attr "mode" "<MODE>")])
+;; Vector-Vector Single-Width Saturating Sub.
+(define_insn "@vsssub<mode>_vv"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (ss_minus:VI
+ (match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand:VI 4 "vector_neg_arith_operand" "vr,vj,vr,vj,vr,vj,vr,vj"))
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vssub.vv\t%0,%3,%4,%1.t
+ vsadd.vi\t%0,%3,%V4,%1.t
+ vssub.vv\t%0,%3,%4,%1.t
+ vsadd.vi\t%0,%3,%V4,%1.t
+ vssub.vv\t%0,%3,%4
+ vsadd.vi\t%0,%3,%V4
+ vssub.vv\t%0,%3,%4
+ vsadd.vi\t%0,%3,%V4"
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vussub<mode>_vv"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (us_minus:VI
+ (match_operand:VI 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:VI 4 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vssubu.vv\t%0,%3,%4,%1.t
+ vssubu.vv\t%0,%3,%4,%1.t
+ vssubu.vv\t%0,%3,%4
+ vssubu.vv\t%0,%3,%4"
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
-;; vmset.m vd -> vmxnor.mm vd,vd,vd # Set mask register
-(define_insn "@vmset<mode>_m"
- [(set (match_operand:VB 0 "register_operand" "=vr")
- (unspec:VB
- [(vec_duplicate:VB (const_int 1))
- (match_operand 1 "p_reg_or_const_csr_operand" "rK")
- (match_operand 2 "const_int_operand")
- (reg:SI VL_REGNUM)
- (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
- "TARGET_VECTOR"
- "vmset.m\t%0"
- [(set_attr "type" "vmask")
- (set_attr "mode" "<MODE>")])
\ No newline at end of file
+;; Vector-Scalar Single-Width Saturating Add.
+(define_insn "@v<optab><mode>_vx_internal"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (any_satplus:VI
+ (match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (vec_duplicate:VI
+ (match_operand:<VSUB> 4 "reg_or_simm5_operand" "r,Ws5,r,Ws5,r,Ws5,r,Ws5")))
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ v<insn>.vx\t%0,%3,%4,%1.t
+ v<insn>.vi\t%0,%3,%4,%1.t
+ v<insn>.vx\t%0,%3,%4,%1.t
+ v<insn>.vi\t%0,%3,%4,%1.t
+ v<insn>.vx\t%0,%3,%4
+ v<insn>.vi\t%0,%3,%4
+ v<insn>.vx\t%0,%3,%4
+ v<insn>.vi\t%0,%3,%4"
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@v<optab><mode>_vx_32bit"
+ [(set (match_operand:V64BITI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:V64BITI
+ [(unspec:V64BITI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (any_satplus:V64BITI
+ (match_operand:V64BITI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (vec_duplicate:V64BITI
+ (sign_extend:<VSUB>
+ (match_operand:SI 4 "reg_or_simm5_operand" "r,Ws5,r,Ws5,r,Ws5,r,Ws5"))))
+ (match_operand:V64BITI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand:SI 5 "csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand:SI 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ v<insn>.vx\t%0,%3,%4,%1.t
+ v<insn>.vi\t%0,%3,%4,%1.t
+ v<insn>.vx\t%0,%3,%4,%1.t
+ v<insn>.vi\t%0,%3,%4,%1.t
+ v<insn>.vx\t%0,%3,%4
+ v<insn>.vi\t%0,%3,%4
+ v<insn>.vx\t%0,%3,%4
+ v<insn>.vi\t%0,%3,%4"
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Single-Width Saturating Sub.
+(define_insn "@vsssub<mode>_vx_internal"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (ss_minus:VI
+ (match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (vec_duplicate:VI
+ (match_operand:<VSUB> 4 "reg_or_neg_simm5_operand" "r,Wn5,r,Wn5,r,Wn5,r,Wn5")))
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ {
+ const char *tail = satisfies_constraint_J (operands[1]) ? "" : ",%1.t";
+ char buf[64] = {0};
+ if (satisfies_constraint_Wn5 (operands[4]))
+ {
+ const char *insn = "vsadd.vi\t%0,%3";
+ snprintf (buf, sizeof (buf), "%s,%d%s", insn, (int)(-INTVAL (operands[4])), tail);
+ }
+ else
+ {
+ const char *insn = "vssub.vx\t%0,%3,%4";
+ snprintf (buf, sizeof (buf), "%s%s", insn, tail);
+ }
+ output_asm_insn (buf, operands);
+ return "";
+ }
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vussub<mode>_vx_internal"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (us_minus:VI
+ (match_operand:VI 3 "register_operand" "vr,vr,vr,vr")
+ (vec_duplicate:VI
+ (match_operand:<VSUB> 4 "register_operand" "r,r,r,r")))
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vssubu.vx\t%0,%3,%4,%1.t
+ vssubu.vx\t%0,%3,%4,%1.t
+ vssubu.vx\t%0,%3,%4
+ vssubu.vx\t%0,%3,%4"
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vsssub<mode>_vx_32bit"
+ [(set (match_operand:V64BITI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:V64BITI
+ [(unspec:V64BITI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (ss_minus:V64BITI
+ (match_operand:V64BITI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (vec_duplicate:V64BITI
+ (sign_extend:<VSUB>
+ (match_operand:SI 4 "reg_or_neg_simm5_operand" "r,Wn5,r,Wn5,r,Wn5,r,Wn5"))))
+ (match_operand:V64BITI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand:SI 5 "csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand:SI 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ {
+ const char *tail = satisfies_constraint_J (operands[1]) ? "" : ",%1.t";
+ char buf[64] = {0};
+ if (satisfies_constraint_Wn5 (operands[4]))
+ {
+ const char *insn = "vsadd.vi\t%0,%3";
+ snprintf (buf, sizeof (buf), "%s,%d%s", insn, (int)(-INTVAL (operands[4])), tail);
+ }
+ else
+ {
+ const char *insn = "vssub.vx\t%0,%3,%4";
+ snprintf (buf, sizeof (buf), "%s%s", insn, tail);
+ }
+ output_asm_insn (buf, operands);
+ return "";
+ }
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vussub<mode>_vx_32bit"
+ [(set (match_operand:V64BITI 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:V64BITI
+ [(unspec:V64BITI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (us_minus:V64BITI
+ (match_operand:V64BITI 3 "register_operand" "vr,vr,vr,vr")
+ (vec_duplicate:V64BITI
+ (sign_extend:<VSUB>
+ (match_operand:SI 4 "register_operand" "r,r,r,r"))))
+ (match_operand:V64BITI 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand:SI 5 "csr_operand" "rK,rK,rK,rK")
+ (match_operand:SI 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vssubu.vx\t%0,%3,%4,%1.t
+ vssubu.vx\t%0,%3,%4,%1.t
+ vssubu.vx\t%0,%3,%4
+ vssubu.vx\t%0,%3,%4"
+ [(set_attr "type" "vsarith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Single-Width Averaging Add and Subtract.
+;; Vector-Vector Single-Width Fractional Multiply with Rounding and Saturation.
+(define_insn "@v<sat_op><mode>_vv"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VI
+ [(match_operand:VI 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:VI 4 "register_operand" "vr,vr,vr,vr")] SAT_OP)
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ v<sat_op>.vv\t%0,%3,%4,%1.t
+ v<sat_op>.vv\t%0,%3,%4,%1.t
+ v<sat_op>.vv\t%0,%3,%4
+ v<sat_op>.vv\t%0,%3,%4"
+ [(set_attr "type" "<vsat>")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Single-Width Averaging Add and Subtract.
+;; Vector-Scalar Single-Width Fractional Multiply with Rounding and Saturation.
+(define_insn "@v<sat_op><mode>_vx_internal"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:VI
+ [(match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (vec_duplicate:VI
+ (match_operand:<VSUB> 4 "reg_or_0_operand" "r,J,r,J,r,J,r,J"))] SAT_OP)
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ v<sat_op>.vx\t%0,%3,%4,%1.t
+ v<sat_op>.vx\t%0,%3,zero,%1.t
+ v<sat_op>.vx\t%0,%3,%4,%1.t
+ v<sat_op>.vx\t%0,%3,zero,%1.t
+ v<sat_op>.vx\t%0,%3,%4
+ v<sat_op>.vx\t%0,%3,zero
+ v<sat_op>.vx\t%0,%3,%4
+ v<sat_op>.vx\t%0,%3,zero"
+ [(set_attr "type" "<vsat>")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@v<sat_op><mode>_vx_32bit"
+ [(set (match_operand:V64BITI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:V64BITI
+ [(unspec:V64BITI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:V64BITI
+ [(match_operand:V64BITI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (vec_duplicate:V64BITI
+ (sign_extend:<VSUB>
+ (match_operand:SI 4 "reg_or_0_operand" "r,J,r,J,r,J,r,J")))] SAT_OP)
+ (match_operand:V64BITI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand:SI 5 "csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand:SI 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ v<sat_op>.vx\t%0,%3,%4,%1.t
+ v<sat_op>.vx\t%0,%3,zero,%1.t
+ v<sat_op>.vx\t%0,%3,%4,%1.t
+ v<sat_op>.vx\t%0,%3,zero,%1.t
+ v<sat_op>.vx\t%0,%3,%4
+ v<sat_op>.vx\t%0,%3,zero
+ v<sat_op>.vx\t%0,%3,%4
+ v<sat_op>.vx\t%0,%3,zero"
+ [(set_attr "type" "<vsat>")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Single-Width Scaling Shift Instructions.
+(define_insn "@v<sshift><mode>_vv"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:VI
+ [(match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand:VI 4 "vector_shift_operand" "vr,vk,vr,vk,vr,vk,vr,vk")] SSHIFT)
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ v<sshift>.vv\t%0,%3,%4,%1.t
+ v<sshift>.vi\t%0,%3,%v4,%1.t
+ v<sshift>.vv\t%0,%3,%4,%1.t
+ v<sshift>.vi\t%0,%3,%v4,%1.t
+ v<sshift>.vv\t%0,%3,%4
+ v<sshift>.vi\t%0,%3,%v4
+ v<sshift>.vv\t%0,%3,%4
+ v<sshift>.vi\t%0,%3,%v4"
+ [(set_attr "type" "vscaleshift")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Single-Width Scaling Shift Instructions.
+(define_insn "@v<sshift><mode>_vx"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:VI
+ [(match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand 4 "p_reg_or_uimm5_operand" "r,K,r,K,r,K,r,K")] SSHIFT)
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ v<sshift>.vx\t%0,%3,%4,%1.t
+ v<sshift>.vi\t%0,%3,%4,%1.t
+ v<sshift>.vx\t%0,%3,%4,%1.t
+ v<sshift>.vi\t%0,%3,%4,%1.t
+ v<sshift>.vx\t%0,%3,%4
+ v<sshift>.vi\t%0,%3,%4
+ v<sshift>.vx\t%0,%3,%4
+ v<sshift>.vi\t%0,%3,%4"
+ [(set_attr "type" "vscaleshift")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector signed/unsigned clip.
+(define_insn "@vn<clip><mode>_wv"
+ [(set (match_operand:VWI 0 "register_operand" "=vd,vd,&vd,vd,&vd, vd,vd,&vd,vd,&vd, vr,vr,&vr,vr,&vr, vr,vr,&vr,vr,&vr")
+ (unspec:VWI
+ [(unspec:VWI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,vm, vm,vm,vm,vm,vm, J,J,J,J,J, J,J,J,J,J")
+ (unspec:VWI
+ [(match_operand:<VW> 3 "register_operand" "0,vr,vr,0,vr, 0,vr,vr,0,vr, 0,vr,vr,0,vr, 0,vr,vr,0,vr")
+ (match_operand:VWI 4 "vector_shift_operand" "vr,0,vr,vk,vk, vr,0,vr,vk,vk, vr,0,vr,vk,vk, vr,0,vr,vk,vk")] CLIP)
+ (match_operand:VWI 2 "vector_reg_or_const0_operand" "0,0,0,0,0, J,J,J,J,J, 0,0,0,0,0, J,J,J,J,J")
+ ] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK, rK,rK,rK,rK,rK, rK,rK,rK,rK,rK, rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vn<clip>.wv\t%0,%3,%4,%1.t
+ vn<clip>.wv\t%0,%3,%4,%1.t
+ vn<clip>.wv\t%0,%3,%4,%1.t
+ vn<clip>.wi\t%0,%3,%v4,%1.t
+ vn<clip>.wi\t%0,%3,%v4,%1.t
+ vn<clip>.wv\t%0,%3,%4,%1.t
+ vn<clip>.wv\t%0,%3,%4,%1.t
+ vn<clip>.wv\t%0,%3,%4,%1.t
+ vn<clip>.wi\t%0,%3,%v4,%1.t
+ vn<clip>.wi\t%0,%3,%v4,%1.t
+ vn<clip>.wv\t%0,%3,%4
+ vn<clip>.wv\t%0,%3,%4
+ vn<clip>.wv\t%0,%3,%4
+ vn<clip>.wi\t%0,%3,%v4
+ vn<clip>.wi\t%0,%3,%v4
+ vn<clip>.wv\t%0,%3,%4
+ vn<clip>.wv\t%0,%3,%4
+ vn<clip>.wv\t%0,%3,%4
+ vn<clip>.wi\t%0,%3,%v4
+ vn<clip>.wi\t%0,%3,%v4"
+ [(set_attr "type" "vclip")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar signed/unsigned clip.
+(define_insn "@vn<clip><mode>_wx"
+ [(set (match_operand:VWI 0 "register_operand" "=vd,&vd,vd,&vd, vd,&vd,vd,&vd, vr,&vr,vr,&vr, vr,?&vr,vr,&vr")
+ (unspec:VWI
+ [(unspec:VWI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm, vm,vm,vm,vm, J,J,J,J, J,J,J,J")
+ (unspec:VWI
+ [(match_operand:<VW> 3 "register_operand" "0,vr,0,vr, 0,vr,0,vr, 0,vr,0,vr, 0,vr,0,vr")
+ (match_operand 4 "p_reg_or_uimm5_operand" "r,r,K,K, r,r,K,K, r,r,K,K, r,r,K,K")] CLIP)
+ (match_operand:VWI 2 "vector_reg_or_const0_operand" "0,0,0,0, J,J,J,J, 0,0,0,0, J,J,J,J")
+ ] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK, rK,rK,rK,rK, rK,rK,rK,rK, rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vn<clip>.wx\t%0,%3,%4,%1.t
+ vn<clip>.wx\t%0,%3,%4,%1.t
+ vn<clip>.wi\t%0,%3,%4,%1.t
+ vn<clip>.wi\t%0,%3,%4,%1.t
+ vn<clip>.wx\t%0,%3,%4,%1.t
+ vn<clip>.wx\t%0,%3,%4,%1.t
+ vn<clip>.wi\t%0,%3,%4,%1.t
+ vn<clip>.wi\t%0,%3,%4,%1.t
+ vn<clip>.wx\t%0,%3,%4
+ vn<clip>.wx\t%0,%3,%4
+ vn<clip>.wi\t%0,%3,%4
+ vn<clip>.wi\t%0,%3,%4
+ vn<clip>.wx\t%0,%3,%4
+ vn<clip>.wx\t%0,%3,%4
+ vn<clip>.wi\t%0,%3,%4
+ vn<clip>.wi\t%0,%3,%4"
+ [(set_attr "type" "vclip")
+ (set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------------------
+;; ---- 13. Vector Floating-Point Arithmetic Instructions
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 13.2 Vector Single-Width Floating-Point Add/Subtract Instructions
+;; - 13.3 Vector Widening Floating-Point Add/Subtract Instrucions
+;; - 13.4 Vector Single-Width Floating-Point Multiply/Divide Instrucions
+;; - 13.5 Vector Widening Floating-Point Multiply
+;; - 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instrucions
+;; - 13.7 Vector Widening Floating-Point Fused Multiply-Add Instrucions
+;; - 13.8 Vector Floating-Point Square-Root Instrucion
+;; - 13.9 Vector Floating-Point Reciprocal Square-Root Estimate Instrucion
+;; - 13.10 Vector Floating-Point Reciprocal Estimate Instruction
+;; - 13.11 Vector Floating-Point MIN/MAX Instrucions
+;; - 13.12 Vector Floating-Point Sign-Injection Instrucions
+;; - 13.13 Vector Floating-Point Compare Instrucions
+;; - 13.14 Vector Floating-Point Classify Instruction
+;; - 13.15 Vector Floating-Point Merge Instruction
+;; - 13.16 Vector Floating-Point Move Instruction
+;; - 13.17 Single-Width Floating-Point/Integer Type-Convert Instructions
+;; - 13.18 Widening Floating-Point/Integer Type-Convert Instructions
+;; - 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions
+;; -------------------------------------------------------------------------------
+
+;; Vector-Vector Single-Width Floating-Point Add/Subtract Instructions.
+;; Vector-Vector Single-Width Floating-Point Multiply/Divide Instrucions.
+;; Vector-Vector Single-Width Floating-Point MIN/MAX Instrucions.
+(define_insn "@vf<optab><mode>_vv"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (any_fop:VF
+ (match_operand:VF 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:VF 4 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vf<insn>.vv\t%0,%3,%4,%1.t
+ vf<insn>.vv\t%0,%3,%4,%1.t
+ vf<insn>.vv\t%0,%3,%4
+ vf<insn>.vv\t%0,%3,%4"
+ [(set_attr "type" "<rvv_type>")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Single-Width Floating-Point Add/Subtract Instructions.
+;; Vector-Scalar Single-Width Floating-Point Multiply/Divide Instrucions.
+;; Vector-Scalar Single-Width Floating-Point MIN/MAX Instrucions.
+(define_insn "@vf<optab><mode>_vf"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (any_fop:VF
+ (match_operand:VF 3 "register_operand" "vr,vr,vr,vr")
+ (vec_duplicate:VF
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f,f")))
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vf<insn>.vf\t%0,%3,%4,%1.t
+ vf<insn>.vf\t%0,%3,%4,%1.t
+ vf<insn>.vf\t%0,%3,%4
+ vf<insn>.vf\t%0,%3,%4"
+ [(set_attr "type" "<rvv_type>")
+ (set_attr "mode" "<MODE>")])
+
+;; Floating-Point Reverse Sub/Div.
+(define_insn "@vfr<optab><mode>_vf"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (minus_div:VF
+ (vec_duplicate:VF
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f,f"))
+ (match_operand:VF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfr<insn>.vf\t%0,%3,%4,%1.t
+ vfr<insn>.vf\t%0,%3,%4,%1.t
+ vfr<insn>.vf\t%0,%3,%4
+ vfr<insn>.vf\t%0,%3,%4"
+ [(set_attr "type" "varith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening Float Add/Subtract.
+(define_insn "@vfw<plus_minus:optab><vw>_vv"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (plus_minus:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr,vr,vr"))
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr,vr,vr")))
+ (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfw<insn>.vv\t%0,%3,%4,%1.t
+ vfw<insn>.vv\t%0,%3,%4,%1.t
+ vfw<insn>.vv\t%0,%3,%4
+ vfw<insn>.vv\t%0,%3,%4"
+ [(set_attr "type" "vwarith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening Float Add/Subtract.
+(define_insn "@vfw<plus_minus:optab><vw>_vf"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (plus_minus:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr,vr,vr"))
+ (float_extend:<VW>
+ (vec_duplicate:VWF
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f,f"))))
+ (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfw<insn>.vf\t%0,%3,%4,%1.t
+ vfw<insn>.vf\t%0,%3,%4,%1.t
+ vfw<insn>.vf\t%0,%3,%4
+ vfw<insn>.vf\t%0,%3,%4"
+ [(set_attr "type" "vwarith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening Float Add/Subtract.
+(define_insn "@vfw<plus_minus:optab><vw>_wv"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (plus_minus:<VW>
+ (match_operand:<VW> 3 "register_operand" "vr,vr,vr,vr")
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr,vr,vr")))
+ (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfw<insn>.wv\t%0,%3,%4,%1.t
+ vfw<insn>.wv\t%0,%3,%4,%1.t
+ vfw<insn>.wv\t%0,%3,%4
+ vfw<insn>.wv\t%0,%3,%4"
+ [(set_attr "type" "vwarith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening Float Add/Subtract.
+(define_insn "@vfw<plus_minus:optab><vw>_wf"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (plus_minus:<VW>
+ (match_operand:<VW> 3 "register_operand" "vr,vr,vr,vr")
+ (float_extend:<VW>
+ (vec_duplicate:VWF
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f,f"))))
+ (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfw<insn>.wf\t%0,%3,%4,%1.t
+ vfw<insn>.wf\t%0,%3,%4,%1.t
+ vfw<insn>.wf\t%0,%3,%4
+ vfw<insn>.wf\t%0,%3,%4"
+ [(set_attr "type" "vwarith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening Float multiply.
+(define_insn "@vfwmul<vw>_vv"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr,vr,vr"))
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr,vr,vr")))
+ (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwmul.vv\t%0,%3,%4,%1.t
+ vfwmul.vv\t%0,%3,%4,%1.t
+ vfwmul.vv\t%0,%3,%4
+ vfwmul.vv\t%0,%3,%4"
+ [(set_attr "type" "vwarith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening Float multiply.
+(define_insn "@vfwmul<vw>_vf"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr,vr,vr"))
+ (float_extend:<VW>
+ (vec_duplicate:VWF
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f,f"))))
+ (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwmul.vf\t%0,%3,%4,%1.t
+ vfwmul.vf\t%0,%3,%4,%1.t
+ vfwmul.vf\t%0,%3,%4
+ vfwmul.vf\t%0,%3,%4"
+ [(set_attr "type" "vwarith")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Single-Width Floating-Point Fused Multiply-Add Instrucions.
+(define_insn "@vf<fmac><mode>_vv"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (unspec:VF
+ [(match_operand:VF 2 "register_operand" "0,0")
+ (match_operand:VF 3 "register_operand" "vr,vr")
+ (match_operand:VF 4 "register_operand" "vr,vr")] FMAC)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vf<fmac>.vv\t%0,%3,%4,%1.t
+ vf<fmac>.vv\t%0,%3,%4"
+ [(set_attr "type" "vmadd")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Single-Width Floating-Point Fused Multiply-Add Instrucions.
+(define_insn "@vf<fmac><mode>_vf"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (unspec:VF
+ [(match_operand:VF 2 "register_operand" "0,0")
+ (vec_duplicate:VF
+ (match_operand:<VSUB> 3 "register_operand" "f,f"))
+ (match_operand:VF 4 "register_operand" "vr,vr")] FMAC)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vf<fmac>.vf\t%0,%3,%4,%1.t
+ vf<fmac>.vf\t%0,%3,%4"
+ [(set_attr "type" "vmadd")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening multiply-accumulate, overwrite addend.
+;; Vector-Vector Widening multiply-subtract-accumulate, overwrite addend.
+(define_insn "@vfwmacc<vw>_vv"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (plus:<VW>
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr"))
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr")))
+ (match_operand:<VW> 2 "register_operand" "0,0"))
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwmacc.vv\t%0,%3,%4,%1.t
+ vfwmacc.vv\t%0,%3,%4"
+ [(set_attr "type" "vwmadd")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vfwmsac<vw>_vv"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (minus:<VW>
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr"))
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr")))
+ (match_operand:<VW> 2 "register_operand" "0,0"))
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwmsac.vv\t%0,%3,%4,%1.t
+ vfwmsac.vv\t%0,%3,%4"
+ [(set_attr "type" "vwmadd")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening multiply-accumulate, overwrite addend.
+;; Vector-Scalar Widening multiply-subtract-accumulate, overwrite addend.
+(define_insn "@vfwmacc<vw>_vf"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (plus:<VW>
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr"))
+ (float_extend:<VW>
+ (vec_duplicate:VWF
+ (match_operand:<VSUB> 3 "register_operand" "f,f"))))
+ (match_operand:<VW> 2 "register_operand" "0,0"))
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwmacc.vf\t%0,%3,%4,%1.t
+ vfwmacc.vf\t%0,%3,%4"
+ [(set_attr "type" "vwmadd")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vfwmsac<vw>_vf"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (minus:<VW>
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr"))
+ (float_extend:<VW>
+ (vec_duplicate:VWF
+ (match_operand:<VSUB> 3 "register_operand" "f,f"))))
+ (match_operand:<VW> 2 "register_operand" "0,0"))
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwmsac.vf\t%0,%3,%4,%1.t
+ vfwmsac.vf\t%0,%3,%4"
+ [(set_attr "type" "vwmadd")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening negate-(multiply-accumulate), overwrite addend.
+;; Vector-Vector Widening negate-(multiply-subtract-accumulate), overwrite addend.
+(define_insn "@vfwnmacc<vw>_vv"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (neg:<VW>
+ (plus:<VW>
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr"))
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr")))
+ (match_operand:<VW> 2 "register_operand" "0,0")))
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwnmacc.vv\t%0,%3,%4,%1.t
+ vfwnmacc.vv\t%0,%3,%4"
+ [(set_attr "type" "vwmadd")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vfwnmsac<vw>_vv"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (neg:<VW>
+ (minus:<VW>
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr"))
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr")))
+ (match_operand:<VW> 2 "register_operand" "0,0")))
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwnmsac.vv\t%0,%3,%4,%1.t
+ vfwnmsac.vv\t%0,%3,%4"
+ [(set_attr "type" "vwmadd")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening negate-(multiply-accumulate), overwrite addend.
+;; Vector-Scalar Widening negate-(multiply-subtract-accumulate), overwrite addend.
+(define_insn "@vfwnmacc<vw>_vf"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (neg:<VW>
+ (plus:<VW>
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr"))
+ (float_extend:<VW>
+ (vec_duplicate:VWF
+ (match_operand:<VSUB> 3 "register_operand" "f,f"))))
+ (match_operand:<VW> 2 "register_operand" "0,0")))
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwnmacc.vf\t%0,%3,%4,%1.t
+ vfwnmacc.vf\t%0,%3,%4"
+ [(set_attr "type" "vwmadd")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vfwnmsac<vw>_vf"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+ (neg:<VW>
+ (minus:<VW>
+ (mult:<VW>
+ (float_extend:<VW>
+ (match_operand:VWF 4 "register_operand" "vr,vr"))
+ (float_extend:<VW>
+ (vec_duplicate:VWF
+ (match_operand:<VSUB> 3 "register_operand" "f,f"))))
+ (match_operand:<VW> 2 "register_operand" "0,0")))
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwnmsac.vf\t%0,%3,%4,%1.t
+ vfwnmsac.vf\t%0,%3,%4"
+ [(set_attr "type" "vwmadd")
+ (set_attr "mode" "<MODE>")])
+
+;; Floating-Point square root.
+(define_insn "@vfsqrt<mode>_v"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (sqrt:VF
+ (match_operand:VF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfsqrt.v\t%0,%3,%1.t
+ vfsqrt.v\t%0,%3,%1.t
+ vfsqrt.v\t%0,%3
+ vfsqrt.v\t%0,%3"
+ [(set_attr "type" "vfsqrt")
+ (set_attr "mode" "<MODE>")])
+
+;; Floating-Point Reciprocal Square-Root Estimate.
+;; Floating-Point Reciprocal Estimate.
+(define_insn "@vf<reciprocal><mode>_v"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VF
+ [(match_operand:VF 3 "register_operand" "vr,vr,vr,vr")] RECIPROCAL)
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vf<reciprocal>.v\t%0,%3,%1.t
+ vf<reciprocal>.v\t%0,%3,%1.t
+ vf<reciprocal>.v\t%0,%3
+ vf<reciprocal>.v\t%0,%3"
+ [(set_attr "type" "vdiv")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Floating-Point Sign-Injection.
+(define_insn "@vfsgnj<nx><mode>_vv"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VF
+ [(match_operand:VF 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:VF 4 "register_operand" "vr,vr,vr,vr")] COPYSIGNS)
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfsgnj<nx>.vv\t%0,%3,%4,%1.t
+ vfsgnj<nx>.vv\t%0,%3,%4,%1.t
+ vfsgnj<nx>.vv\t%0,%3,%4
+ vfsgnj<nx>.vv\t%0,%3,%4"
+ [(set_attr "type" "vfsgnj")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Floating-Point Sign-Injection.
+(define_insn "@vfsgnj<nx><mode>_vf"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VF
+ [(match_operand:VF 3 "register_operand" "vr,vr,vr,vr")
+ (vec_duplicate:VF
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f,f"))] COPYSIGNS)
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfsgnj<nx>.vf\t%0,%3,%4,%1.t
+ vfsgnj<nx>.vf\t%0,%3,%4,%1.t
+ vfsgnj<nx>.vf\t%0,%3,%4
+ vfsgnj<nx>.vf\t%0,%3,%4"
+ [(set_attr "type" "vfsgnj")
+ (set_attr "mode" "<MODE>")])
+
+;; vfneg.v vd,vs = vfsgnjn.vv vd,vs,vs.
+(define_insn "@vfneg<mode>_v"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (neg:VF
+ (match_operand:VF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfneg.v\t%0,%3,%1.t
+ vfneg.v\t%0,%3,%1.t
+ vfneg.v\t%0,%3
+ vfneg.v\t%0,%3"
+ [(set_attr "type" "vfsgnj")
+ (set_attr "mode" "<MODE>")])
+
+;; vfabs.v vd,vs = vfsgnjn.vv vd,vs,vs.
+(define_insn "@vfabs<mode>_v"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (abs:VF
+ (match_operand:VF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfabs.v\t%0,%3,%1.t
+ vfabs.v\t%0,%3,%1.t
+ vfabs.v\t%0,%3
+ vfabs.v\t%0,%3"
+ [(set_attr "type" "vfsgnj")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Floating-Point Compare Instrucions.
+(define_insn "@vmf<optab><mode>_vv"
+ [(set (match_operand:<VM> 0 "register_operand" "=vr,vr,vm,&vr, vr,vr,vm,&vr, vr,vr,&vr")
+ (unspec:<VM>
+ [(unspec:<VM>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,0,vm, vm,vm,0,vm, J,J,J")
+ (any_fcmp:<VM>
+ (match_operand:VF 3 "register_operand" "0,vr,vr,vr, 0,vr,vr,vr, 0,vr,vr")
+ (match_operand:VF 4 "register_operand" "vr,0,vr,vr, vr,0,vr,vr, vr,0,vr"))
+ (match_operand:<VM> 2 "vector_reg_or_const0_operand" "0,0,0,0, J,J,J,J, J,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK, rK,rK,rK,rK, rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vmf<insn>.vv\t%0,%3,%4,%1.t
+ vmf<insn>.vv\t%0,%3,%4,%1.t
+ vmf<insn>.vv\t%0,%3,%4,%1.t
+ vmf<insn>.vv\t%0,%3,%4,%1.t
+ vmf<insn>.vv\t%0,%3,%4,%1.t
+ vmf<insn>.vv\t%0,%3,%4,%1.t
+ vmf<insn>.vv\t%0,%3,%4,%1.t
+ vmf<insn>.vv\t%0,%3,%4,%1.t
+ vmf<insn>.vv\t%0,%3,%4
+ vmf<insn>.vv\t%0,%3,%4
+ vmf<insn>.vv\t%0,%3,%4"
+ [(set_attr "type" "vcmp")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Floating-Point Compare Instrucions.
+(define_insn "@vmf<optab><mode>_vf"
+ [(set (match_operand:<VM> 0 "register_operand" "=vr,vm,&vr, vr,vm,&vr, vr,&vr")
+ (unspec:<VM>
+ [(unspec:<VM>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,0,vm, vm,0,vm, J,J")
+ (any_fcmp:<VM>
+ (match_operand:VF 3 "register_operand" "0,vr,vr, 0,vr,vr, 0,vr")
+ (vec_duplicate:VF
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f, f,f,f, f,f")))
+ (match_operand:<VM> 2 "vector_reg_or_const0_operand" "0,0,0, J,J,J, J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK, rK,rK,rK, rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vmf<insn>.vf\t%0,%3,%4,%1.t
+ vmf<insn>.vf\t%0,%3,%4,%1.t
+ vmf<insn>.vf\t%0,%3,%4,%1.t
+ vmf<insn>.vf\t%0,%3,%4,%1.t
+ vmf<insn>.vf\t%0,%3,%4,%1.t
+ vmf<insn>.vf\t%0,%3,%4,%1.t
+ vmf<insn>.vf\t%0,%3,%4
+ vmf<insn>.vf\t%0,%3,%4"
+ [(set_attr "type" "vcmp")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Floating-Point Comparison with no trapping.
+;; These are used by auto-vectorization.
+(define_expand "@vmf<optab><mode>_vv"
+ [(set (match_operand:<VM> 0 "register_operand")
+ (unspec:<VM>
+ [(unspec:<VM>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand")
+ (any_fcmp_no_trapping:<VM>
+ (match_operand:VF 3 "register_operand")
+ (match_operand:VF 4 "register_operand"))
+ (match_operand:<VM> 2 "vector_reg_or_const0_operand")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand")
+ (match_operand 6 "const_int_operand")] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+{
+ rtx mask = gen_reg_rtx (<VM>mode);
+ if (strcmp ("<optab>", "ltgt") == 0)
+ {
+ emit_insn (gen_vmf_vv (GT, <MODE>mode, operands[0],
+ operands[1], operands[2], operands[3], operands[4],
+ operands[5], operands[6]));
+ emit_insn (gen_vmf_vv (GT, <MODE>mode, mask,
+ operands[1], operands[2], operands[4], operands[3],
+ operands[5], operands[6]));
+ emit_insn (gen_vm_mm (IOR, <VM>mode, operands[0], operands[0], mask,
+ operands[5], operands[6]));
+ }
+ else
+ {
+ /* Example of implementing isgreater()
+ vmfeq.vv v0, va, va ;; Only set where A is not NaN.
+ vmfeq.vv v1, vb, vb ;; Only set where B is not NaN.
+ vmand.mm v0, v0, v1 ;; Only set where A and B are ordered,
+ vmfgt.vv v0, va, vb, v0.t ;; so only set flags on ordered values. */
+ emit_insn (gen_vmf_vv (EQ, <MODE>mode, operands[0],
+ operands[1], operands[2], operands[3], operands[3],
+ operands[5], operands[6]));
+ emit_insn (gen_vmf_vv (EQ, <MODE>mode, mask,
+ operands[1], operands[2], operands[4], operands[4],
+ operands[5], operands[6]));
+ emit_insn (gen_vm_mm (AND, <VM>mode, operands[0], operands[0], mask,
+ operands[5], operands[6]));
+
+ rtx all_ones = gen_reg_rtx (<VM>mode);
+ emit_insn (gen_vmset<vm>_m (all_ones, operands[5],
+ rvv_gen_policy ()));
+
+ if (strcmp ("<optab>", "ordered") != 0)
+ {
+ if (strcmp ("<optab>", "unordered") == 0)
+ emit_insn (gen_vmnot_m (<VM>mode, operands[0], operands[0], operands[5], operands[6]));
+ else
+ {
+ enum rtx_code code = strcmp ("<optab>", "unlt") == 0 ? LT :
+ strcmp ("<optab>", "unle") == 0 ? LE :
+ strcmp ("<optab>", "unge") == 0 ? GE :
+ strcmp ("<optab>", "ungt") == 0 ? GT : EQ;
+ emit_insn (gen_vmf_vv (code, <MODE>mode, operands[0],
+ operands[0], all_ones, operands[3], operands[4],
+ operands[5], operands[6]));
+ }
+ }
+ }
+ DONE;
+})
+
+;; Floating-Point Classify Instruction.
+(define_insn "@vfclass<vmap>_v"
+ [(set (match_operand:<VMAP> 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:<VMAP>
+ [(unspec:<VMAP>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:<VMAP>
+ [(match_operand:VF 3 "register_operand" "vr,vr,vr,vr")] UNSPEC_FCLASS)
+ (match_operand:<VMAP> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfclass.v\t%0,%3,%1.t
+ vfclass.v\t%0,%3,%1.t
+ vfclass.v\t%0,%3
+ vfclass.v\t%0,%3"
+ [(set_attr "type" "vfclass")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Floating-Point merge.
+(define_insn "@vfmerge<mode>_vfm"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd")
+ (unspec:VF
+ [(match_operand:VF 2 "vector_reg_or_const0_operand" "0,J")
+ (unspec:VF
+ [(match_operand:<VM> 1 "register_operand" "vm,vm")
+ (match_operand:VF 3 "register_operand" "vr,vr")
+ (vec_duplicate:VF
+ (match_operand:<VSUB> 4 "register_operand" "f,f"))] UNSPEC_MERGE)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfmerge.vfm\t%0,%3,%4,%1
+ vfmerge.vfm\t%0,%3,%4,%1"
+ [(set_attr "type" "vmerge")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Floating-Point Move.
+(define_insn "@vfmv<mode>_v_f"
+ [(set (match_operand:VF 0 "register_operand" "=vr,vr")
+ (unspec:VF
+ [(match_operand:VF 1 "vector_reg_or_const0_operand" "0,J")
+ (vec_duplicate:VF
+ (match_operand:<VSUB> 2 "register_operand" "f,f"))
+ (match_operand 3 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vfmv.v.f\t%0,%2"
+ [(set_attr "type" "vmove")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert float to unsigned integer.
+;; Convert float to signed integer.
+(define_insn "@vfcvt<vmap>_x<fu>_f_v"
+ [(set (match_operand:<VMAP> 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:<VMAP>
+ [(unspec:<VMAP>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:<VMAP>
+ [(match_operand:VF 3 "register_operand" "vr,vr,vr,vr")] FCVT)
+ (match_operand:<VMAP> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfcvt.x<fu>.f.v\t%0,%3,%1.t
+ vfcvt.x<fu>.f.v\t%0,%3,%1.t
+ vfcvt.x<fu>.f.v\t%0,%3
+ vfcvt.x<fu>.f.v\t%0,%3"
+ [(set_attr "type" "vfcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert float to unsigned integer, truncating.
+;; Convert float to signed integer, truncating.
+(define_insn "@vfcvt<vmap>_rtz_x<u>_f_v"
+ [(set (match_operand:<VMAP> 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:<VMAP>
+ [(unspec:<VMAP>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (any_fix:<VMAP>
+ (match_operand:VF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:<VMAP> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfcvt.rtz.x<u>.f.v\t%0,%3,%1.t
+ vfcvt.rtz.x<u>.f.v\t%0,%3,%1.t
+ vfcvt.rtz.x<u>.f.v\t%0,%3
+ vfcvt.rtz.x<u>.f.v\t%0,%3"
+ [(set_attr "type" "vfcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert unsigned integer to float.
+;; Convert signed integer to float.
+(define_insn "@vfcvt<mode>_f_x<u>_v"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (any_float:VF
+ (match_operand:<VMAP> 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfcvt.f.x<u>.v\t%0,%3,%1.t
+ vfcvt.f.x<u>.v\t%0,%3,%1.t
+ vfcvt.f.x<u>.v\t%0,%3
+ vfcvt.f.x<u>.v\t%0,%3"
+ [(set_attr "type" "vfcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert float to double-width unsigned integer.
+;; Convert float to double-width signed integer.
+(define_insn "@vfwcvt<vwmap>_x<fu>_f_v"
+ [(set (match_operand:<VWMAP> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VWMAP>
+ [(unspec:<VWMAP>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:<VWMAP>
+ [(match_operand:VWF 3 "register_operand" "vr,vr,vr,vr")] FCVT)
+ (match_operand:<VWMAP> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwcvt.x<fu>.f.v\t%0,%3,%1.t
+ vfwcvt.x<fu>.f.v\t%0,%3,%1.t
+ vfwcvt.x<fu>.f.v\t%0,%3
+ vfwcvt.x<fu>.f.v\t%0,%3"
+ [(set_attr "type" "vfwcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert float to double-width unsigned integer, truncating.
+;; Convert float to double-width signed integer, truncating.
+(define_insn "@vfwcvt<vwmap>_rtz_x<u>_f_v"
+ [(set (match_operand:<VWMAP> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VWMAP>
+ [(unspec:<VWMAP>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (any_fix:<VWMAP>
+ (match_operand:VWF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:<VWMAP> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwcvt.rtz.x<u>.f.v\t%0,%3,%1.t
+ vfwcvt.rtz.x<u>.f.v\t%0,%3,%1.t
+ vfwcvt.rtz.x<u>.f.v\t%0,%3
+ vfwcvt.rtz.x<u>.f.v\t%0,%3"
+ [(set_attr "type" "vfwcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert unsigned integer to double-width float.
+;; Convert signed integer to double-width float.
+(define_insn "@vfwcvt<vwfmap>_f_x<u>_v"
+ [(set (match_operand:<VWFMAP> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VWFMAP>
+ [(unspec:<VWFMAP>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (any_float:<VWFMAP>
+ (match_operand:VWINOQI 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:<VWFMAP> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwcvt.f.x<u>.v\t%0,%3,%1.t
+ vfwcvt.f.x<u>.v\t%0,%3,%1.t
+ vfwcvt.f.x<u>.v\t%0,%3
+ vfwcvt.f.x<u>.v\t%0,%3"
+ [(set_attr "type" "vfwcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert single-width float to double-width float
+(define_insn "@vfwcvt<vw>_f_f_v"
+ [(set (match_operand:<VW> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VW>
+ [(unspec:<VW>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (float_extend:<VW>
+ (match_operand:VWF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwcvt.f.f.v\t%0,%3,%1.t
+ vfwcvt.f.f.v\t%0,%3,%1.t
+ vfwcvt.f.f.v\t%0,%3
+ vfwcvt.f.f.v\t%0,%3"
+ [(set_attr "type" "vfwcvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert double-width float to unsigned integer.
+;; Convert double-width float to signed integer.
+(define_insn "@vfncvt<mode>_x<fu>_f_w"
+ [(set (match_operand:VWINOQI 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:VWINOQI
+ [(unspec:VWINOQI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VWINOQI
+ [(match_operand:<VWFMAP> 3 "register_operand" "vr,vr,vr,vr")] FCVT)
+ (match_operand:VWINOQI 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfncvt.x<fu>.f.w\t%0,%3,%1.t
+ vfncvt.x<fu>.f.w\t%0,%3,%1.t
+ vfncvt.x<fu>.f.w\t%0,%3
+ vfncvt.x<fu>.f.w\t%0,%3"
+ [(set_attr "type" "vfncvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert double-width float to unsigned integer, truncating.
+;; Convert double-width float to signed integer, truncating.
+(define_insn "@vfncvt<mode>_rtz_x<u>_f_w"
+ [(set (match_operand:VWINOQI 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:VWINOQI
+ [(unspec:VWINOQI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (any_fix:VWINOQI
+ (match_operand:<VWFMAP> 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VWINOQI 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfncvt.rtz.x<u>.f.w\t%0,%3,%1.t
+ vfncvt.rtz.x<u>.f.w\t%0,%3,%1.t
+ vfncvt.rtz.x<u>.f.w\t%0,%3
+ vfncvt.rtz.x<u>.f.w\t%0,%3"
+ [(set_attr "type" "vfncvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert double-width unsigned integer to float.
+;; Convert double-width signed integer to float.
+(define_insn "@vfncvt<mode>_f_x<u>_w"
+ [(set (match_operand:VWF 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:VWF
+ [(unspec:VWF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (any_float:VWF
+ (match_operand:<VWMAP> 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VWF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfncvt.f.x<u>.w\t%0,%3,%1.t
+ vfncvt.f.x<u>.w\t%0,%3,%1.t
+ vfncvt.f.x<u>.w\t%0,%3
+ vfncvt.f.x<u>.w\t%0,%3"
+ [(set_attr "type" "vfncvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert double-width float to single-width float.
+(define_insn "@vfncvt<mode>_f_f_w"
+ [(set (match_operand:VWF 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:VWF
+ [(unspec:VWF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (float_truncate:VWF
+ (match_operand:<VW> 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:VWF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfncvt.f.f.w\t%0,%3,%1.t
+ vfncvt.f.f.w\t%0,%3,%1.t
+ vfncvt.f.f.w\t%0,%3
+ vfncvt.f.f.w\t%0,%3"
+ [(set_attr "type" "vfncvt")
+ (set_attr "mode" "<MODE>")])
+
+;; Convert double-width float to single-width float, rounding towards odd.
+(define_insn "@vfncvt<mode>_rod_f_f_w"
+ [(set (match_operand:VWF 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:VWF
+ [(unspec:VWF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VWF
+ [(float_extend:VWF
+ (match_operand:<VW> 3 "register_operand" "vr,vr,vr,vr"))] UNSPEC_ROD)
+ (match_operand:VWF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfncvt.rod.f.f.w\t%0,%3,%1.t
+ vfncvt.rod.f.f.w\t%0,%3,%1.t
+ vfncvt.rod.f.f.w\t%0,%3
+ vfncvt.rod.f.f.w\t%0,%3"
+ [(set_attr "type" "vfncvt")
+ (set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------------------
+;; ---- 14. Vector Reduction Operations
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 14.1 Vector Single-Width Integer Reduction Instructions
+;; - 14.2 Vector Widening Integer Reduction Instructions
+;; - 14.3 Vector Single-Width Floating-Point Reduction
+;; - 14.4 Vector Widening Floating-Point Reduction Instructions
+;; -------------------------------------------------------------------------------
+
+;; Integer simple-reductions.
+(define_insn "@vred<reduc><mode>_vs"
+ [(set (match_operand:<VLMUL1> 0 "register_operand" "=vr,vr,vr,vr")
+ (unspec:<VLMUL1>
+ [(unspec:<VM>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:<VLMUL1>
+ [(match_operand:<VLMUL1> 2 "vector_reg_or_const0_operand" "0,J,0,J")
+ (match_operand:VI 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:<VLMUL1> 4 "register_operand" "vr,vr,vr,vr")] REDUC)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vred<reduc>.vs\t%0,%3,%4,%1.t
+ vred<reduc>.vs\t%0,%3,%4,%1.t
+ vred<reduc>.vs\t%0,%3,%4
+ vred<reduc>.vs\t%0,%3,%4"
+ [(set_attr "type" "vreduc")
+ (set_attr "mode" "<MODE>")])
+
+;; Signed/Unsigned sum reduction into double-width accumulator.
+(define_insn "@vwredsum<u><mode>_vs"
+ [(set (match_operand:<VWLMUL1> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VWLMUL1>
+ [(unspec:<VM>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:<VWLMUL1>
+ [(match_operand:<VWLMUL1> 2 "vector_reg_or_const0_operand" "0,J,0,J")
+ (any_extend:<VWLMUL1>
+ (match_operand:VWREDI 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:<VWLMUL1> 4 "register_operand" "vr,vr,vr,vr")] UNSPEC_REDUC_SUM)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vwredsum<u>.vs\t%0,%3,%4,%1.t
+ vwredsum<u>.vs\t%0,%3,%4,%1.t
+ vwredsum<u>.vs\t%0,%3,%4
+ vwredsum<u>.vs\t%0,%3,%4"
+ [(set_attr "type" "vwreduc")
+ (set_attr "mode" "<MODE>")])
+
+;; Floating-Point simple-reductions.
+(define_insn "@vfred<reduc><mode>_vs"
+ [(set (match_operand:<VLMUL1> 0 "register_operand" "=vr,vr,vr,vr")
+ (unspec:<VLMUL1>
+ [(unspec:<VM>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:<VLMUL1>
+ [(match_operand:<VLMUL1> 2 "vector_reg_or_const0_operand" "0,J,0,J")
+ (match_operand:VF 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:<VLMUL1> 4 "register_operand" "vr,vr,vr,vr")] FREDUC)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfred<reduc>.vs\t%0,%3,%4,%1.t
+ vfred<reduc>.vs\t%0,%3,%4,%1.t
+ vfred<reduc>.vs\t%0,%3,%4
+ vfred<reduc>.vs\t%0,%3,%4"
+ [(set_attr "type" "vreduc")
+ (set_attr "mode" "<MODE>")])
+
+;; unordered sum reduction into double-width accumulator.
+(define_insn "@vfwredusum<mode>_vs"
+ [(set (match_operand:<VWLMUL1> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VWLMUL1>
+ [(unspec:<VM>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:<VWLMUL1>
+ [(match_operand:<VWLMUL1> 2 "vector_reg_or_const0_operand" "0,J,0,J")
+ (float_extend:<VWLMUL1>
+ (match_operand:VWREDF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:<VWLMUL1> 4 "register_operand" "vr,vr,vr,vr")] UNSPEC_REDUC_UNORDERED_SUM)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwredusum.vs\t%0,%3,%4,%1.t
+ vfwredusum.vs\t%0,%3,%4,%1.t
+ vfwredusum.vs\t%0,%3,%4
+ vfwredusum.vs\t%0,%3,%4"
+ [(set_attr "type" "vwreduc")
+ (set_attr "mode" "<MODE>")])
+
+;; ordered sum reduction into double-width accumulator.
+(define_insn "@vfwredosum<mode>_vs"
+ [(set (match_operand:<VWLMUL1> 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:<VWLMUL1>
+ [(unspec:<VM>
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:<VWLMUL1>
+ [(match_operand:<VWLMUL1> 2 "vector_reg_or_const0_operand" "0,J,0,J")
+ (float_extend:<VWLMUL1>
+ (match_operand:VWREDF 3 "register_operand" "vr,vr,vr,vr"))
+ (match_operand:<VWLMUL1> 4 "register_operand" "vr,vr,vr,vr")] UNSPEC_REDUC_ORDERED_SUM)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfwredosum.vs\t%0,%3,%4,%1.t
+ vfwredosum.vs\t%0,%3,%4,%1.t
+ vfwredosum.vs\t%0,%3,%4
+ vfwredosum.vs\t%0,%3,%4"
+ [(set_attr "type" "vwreduc")
+ (set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------------------
+;; ---- 15. Vector Mask Instructions
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 15.1 Vector Mask-Register Logical Instructions
+;; - 15.2 Vector mask population count vpopc
+;; - 15.3 vfirst find-first-set mask bit
+;; - 15.4 vmsbf.m set-before-first mask bit
+;; - 15.5 vmsif.m set-including-fisrt mask bit
+;; - 15.6 vmsof.m set-only-first mask bit
+;; - 15.8 Vector Iota Instruction
+;; - 15.9 Vector Element Index Instructions
+;; -------------------------------------------------------------------------------
+
+;; Vector Mask-Register Logical Instructions.
+(define_insn "@vm<optab><mode>_mm"
+ [(set (match_operand:VB 0 "register_operand" "=vr")
+ (unspec:VB
+ [(any_bitwise:VB
+ (match_operand:VB 1 "register_operand" "vr")
+ (match_operand:VB 2 "register_operand" "vr"))
+ (match_operand 3 "p_reg_or_const_csr_operand" "rK")
+ (match_operand 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vm<insn>.mm\t%0,%1,%2"
+ [(set_attr "type" "vmask")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmn<optab><mode>_mm"
+ [(set (match_operand:VB 0 "register_operand" "=vr")
+ (unspec:VB
+ [(not:VB
+ (any_bitwise:VB
+ (match_operand:VB 1 "register_operand" "vr")
+ (match_operand:VB 2 "register_operand" "vr")))
+ (match_operand 3 "p_reg_or_const_csr_operand" "rK")
+ (match_operand 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vm<ninsn>.mm\t%0,%1,%2"
+ [(set_attr "type" "vmask")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vm<optab>not<mode>_mm"
+ [(set (match_operand:VB 0 "register_operand" "=vr")
+ (unspec:VB
+ [(any_logicalnot:VB
+ (match_operand:VB 1 "register_operand" "vr")
+ (not:VB
+ (match_operand:VB 2 "register_operand" "vr")))
+ (match_operand 3 "p_reg_or_const_csr_operand" "rK")
+ (match_operand 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vm<insn>n.mm\t%0,%1,%2"
+ [(set_attr "type" "vmask")
+ (set_attr "mode" "<MODE>")])
+
+;; vmmv.m vd,vs -> vmand.mm vd,vs,vs # Copy mask register
+(define_insn "@vmmv<mode>_m"
+ [(set (match_operand:VB 0 "register_operand" "=vr")
+ (unspec:VB
+ [(match_operand:VB 1 "register_operand" "vr")
+ (match_operand 2 "p_reg_or_const_csr_operand" "rK")
+ (match_operand 3 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vmmv.m\t%0,%1"
+ [(set_attr "type" "vmask")
+ (set_attr "mode" "<MODE>")])
+
+;; vmclr.m vd -> vmxor.mm vd,vd,vd # Clear mask register
+(define_insn "@vmclr<mode>_m"
+ [(set (match_operand:VB 0 "register_operand" "=vr")
+ (unspec:VB
+ [(vec_duplicate:VB (const_int 0))
+ (match_operand 1 "p_reg_or_const_csr_operand" "rK")
+ (match_operand 2 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vmclr.m\t%0"
+ [(set_attr "type" "vmask")
+ (set_attr "mode" "<MODE>")])
+
+;; vmset.m vd -> vmxnor.mm vd,vd,vd # Set mask register
+(define_insn "@vmset<mode>_m"
+ [(set (match_operand:VB 0 "register_operand" "=vr")
+ (unspec:VB
+ [(vec_duplicate:VB (const_int 1))
+ (match_operand 1 "p_reg_or_const_csr_operand" "rK")
+ (match_operand 2 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vmset.m\t%0"
+ [(set_attr "type" "vmask")
+ (set_attr "mode" "<MODE>")])
+
+;; vmnot.m vd,vs -> vmnand.mm vd,vs,vs # Invert bits
+(define_insn "@vmnot<mode>_m"
+ [(set (match_operand:VB 0 "register_operand" "=vr")
+ (unspec:VB
+ [(not:VB
+ (match_operand:VB 1 "register_operand" "vr"))
+ (match_operand 2 "p_reg_or_const_csr_operand" "rK")
+ (match_operand 3 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vmnot.m\t%0,%1"
+ [(set_attr "type" "vmask")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector mask population count vpopc
+(define_insn "@vcpop<VB:mode>_<X:mode>_m"
+ [(set (match_operand:X 0 "register_operand" "=r,r")
+ (unspec:X
+ [(unspec:VB
+ [(match_operand:VB 1 "vector_reg_or_const0_operand" "vm,J")
+ (match_operand:VB 2 "register_operand" "vr,vr")
+ ] UNSPEC_VCPOP)
+ (match_operand 3 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vcpop.m\t%0,%2,%1.t
+ vcpop.m\t%0,%2"
+ [(set_attr "type" "vcpop")
+ (set_attr "mode" "<VB:MODE>")])
+
+;; vfirst find-first-set mask bit
+(define_insn "@vfirst<VB:mode>_<X:mode>_m"
+ [(set (match_operand:X 0 "register_operand" "=r,r")
+ (unspec:X
+ [(unspec:VB
+ [(match_operand:VB 1 "vector_reg_or_const0_operand" "vm,J")
+ (match_operand:VB 2 "register_operand" "vr,vr")] UNSPEC_FIRST)
+ (match_operand 3 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfirst.m\t%0,%2,%1.t
+ vfirst.m\t%0,%2"
+ [(set_attr "type" "vmsetbit")
+ (set_attr "mode" "<VB:MODE>")])
+
+;; vmsbf.m set-before-first mask bit.
+;; vmsif.m set-including-fisrt mask bit.
+;; vmsof.m set-only-first mask bit.
+(define_insn "@vm<smb><mode>_m"
+ [(set (match_operand:VB 0 "register_operand" "=&vr,&vr,&vr")
+ (unspec:VB
+ [(unspec:VB
+ [(match_operand:VB 1 "vector_reg_or_const0_operand" "vm,vm,J")
+ (unspec:VB
+ [(match_operand:VB 3 "register_operand" "vr,vr,vr")] MASK_SET)
+ (match_operand:VB 2 "vector_reg_or_const0_operand" "0,J,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vm<smb>.m\t%0,%3,%1.t
+ vm<smb>.m\t%0,%3,%1.t
+ vm<smb>.m\t%0,%3"
+ [(set_attr "type" "vmsetbit")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector Iota Instruction.
+(define_insn "@viota<mode>_m"
+ [(set (match_operand:VI 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VI
+ [(match_operand:<VM> 3 "register_operand" "vr,vr,vr,vr")] UNSPEC_IOTA)
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ viota.m\t%0,%3,%1.t
+ viota.m\t%0,%3,%1.t
+ viota.m\t%0,%3
+ viota.m\t%0,%3"
+ [(set_attr "type" "viota")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector Element Index Instructions.
+(define_insn "@vid<mode>_v"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VI
+ [(match_operand 3 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 4 "const_int_operand")] UNSPEC_ID)
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vid.v\t%0,%1.t
+ vid.v\t%0,%1.t
+ vid.v\t%0
+ vid.v\t%0"
+ [(set_attr "type" "vid")
+ (set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------------------
+;; ---- 16. Vector Permutation Instructions
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 16.1 Integer Scalar Move Instructions
+;; - 16.2 Floating-Point Scalar Move Instructions
+;; - 16.3 Vector slide Instructins
+;; - 16.4 Vector Register Gather Instructions
+;; - 16.5 Vector Compress Instructions
+;; -------------------------------------------------------------------------------
+
+;; Integer Scalar Move Instructions.
+(define_insn "@vmv<mode>_x_s"
+ [(set (match_operand:<VSUB> 0 "register_operand" "=r")
+ (unspec:<VSUB>
+ [(vec_select:<VSUB>
+ (match_operand:VNOT64BITI 1 "register_operand" "vr")
+ (parallel [(const_int 0)]))
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vmv.x.s\t%0,%1"
+ [(set_attr "type" "vmv_x_s")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "@vmv<mode>_x_s"
+ [(set (match_operand:<VSUB> 0 "register_operand")
+ (unspec:<VSUB>
+ [(vec_select:<VSUB>
+ (match_operand:V64BITI 1 "register_operand")
+ (parallel [(const_int 0)]))
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ {
+ if (!TARGET_64BIT)
+ {
+ rtx vector = gen_reg_rtx (<MODE>mode);
+ rtx shift = gen_reg_rtx (Pmode);
+ shift = force_reg (Pmode, GEN_INT (32));
+
+ rtx lo = gen_lowpart (Pmode, operands[0]);
+ rtx hi = gen_highpart (Pmode, operands[0]);
+ emit_insn (gen_vlshr<mode>_vx (vector,
+ const0_rtx, const0_rtx, operands[1],
+ shift, GEN_INT(1), rvv_gen_policy ()));
+ emit_insn (gen_vmv<mode>_x_s_lo (lo, operands[1]));
+ emit_insn (gen_vmv<mode>_x_s_hi (hi, vector));
+ DONE;
+ }
+
+ emit_insn (gen_vmv<mode>_x_s_di_internal (operands[0], operands[1]));
+ DONE;
+ })
+
+(define_insn "vmv<mode>_x_s_di_internal"
+ [(set (match_operand:<VSUB> 0 "register_operand" "=r")
+ (unspec:<VSUB>
+ [(vec_select:<VSUB>
+ (match_operand:V64BITI 1 "register_operand" "vr")
+ (parallel [(const_int 0)]))
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vmv.x.s\t%0,%1"
+ [(set_attr "type" "vmv_x_s")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "vmv<mode>_x_s_lo"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI
+ [(vec_select:DI
+ (match_operand:V64BITI 1 "register_operand" "vr")
+ (parallel [(const_int 0)]))
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_LO))]
+ "TARGET_VECTOR"
+ "vmv.x.s\t%0,%1"
+ [(set_attr "type" "vmv_x_s")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "vmv<mode>_x_s_hi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI
+ [(vec_select:DI
+ (match_operand:V64BITI 1 "register_operand" "vr")
+ (parallel [(const_int 0)]))
+ (reg:SI VTYPE_REGNUM)] UNSPEC_HI))]
+ "TARGET_VECTOR"
+ "vmv.x.s\t%0,%1"
+ [(set_attr "type" "vmv_x_s")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmv<mode>_s_x_internal"
+ [(set (match_operand:VI 0 "register_operand" "=vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(vec_duplicate:VI
+ (match_operand:<VSUB> 2 "reg_or_0_operand" "r,J,r,J"))
+ (match_operand:VI 1 "vector_reg_or_const0_operand" "0,0,J,J")
+ (const_int 1)] UNSPEC_VMV_SX)
+ (match_operand 3 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vmv.s.x\t%0,%2
+ vmv.s.x\t%0,zero
+ vmv.s.x\t%0,%2
+ vmv.s.x\t%0,zero"
+ [(set_attr "type" "vmv_s_x")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmv<mode>_s_x_32bit"
+ [(set (match_operand:V64BITI 0 "register_operand" "=vr,vr,vr,vr")
+ (unspec:V64BITI
+ [(unspec:V64BITI
+ [(vec_duplicate:V64BITI
+ (sign_extend:<VSUB>
+ (match_operand:SI 2 "reg_or_0_operand" "r,J,r,J")))
+ (match_operand:V64BITI 1 "vector_reg_or_const0_operand" "0,0,J,J")
+ (const_int 1)] UNSPEC_VMV_SX)
+ (match_operand:SI 3 "csr_operand" "rK,rK,rK,rK")
+ (match_operand:SI 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vmv.s.x\t%0,%2
+ vmv.s.x\t%0,zero
+ vmv.s.x\t%0,%2
+ vmv.s.x\t%0,zero"
+ [(set_attr "type" "vmv_s_x")
+ (set_attr "mode" "<MODE>")])
+
+;; This pattern is used by auto-vectorization to
+;; initiate a vector whose value of element 0 is
+;; zero. We dont't want to use subreg to generate
+;; transformation between floating-point and integer.
+(define_insn "@vmv<mode>_s_x_internal"
+ [(set (match_operand:VF 0 "register_operand" "=vr")
+ (unspec:VF
+ [(unspec:VF
+ [(const_int 0)
+ (const_int 1)] UNSPEC_VMV_SX)
+ (match_operand 1 "p_reg_or_const_csr_operand" "rK")
+ (match_operand 2 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vmv.s.x\t%0,zero"
+ [(set_attr "type" "vmv_s_x")
+ (set_attr "mode" "<MODE>")])
+
+;; Floating-Point Scalar Move Instructions.
+(define_insn "@vfmv<mode>_f_s"
+ [(set (match_operand:<VSUB> 0 "register_operand" "=f")
+ (unspec:<VSUB>
+ [(vec_select:<VSUB>
+ (match_operand:VF 1 "register_operand" "vr")
+ (parallel [(const_int 0)]))
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vfmv.f.s\t%0,%1"
+ [(set_attr "type" "vfmv_f_s")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vfmv<mode>_s_f"
+ [(set (match_operand:VF 0 "register_operand" "=vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(vec_duplicate:VF
+ (match_operand:<VSUB> 2 "register_operand" "f,f"))
+ (match_operand:VF 1 "vector_reg_or_const0_operand" "0,J")
+ (const_int 1)] UNSPEC_VMV_SX)
+ (match_operand 3 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 4 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vfmv.s.f\t%0,%2"
+ [(set_attr "type" "vfmv_s_f")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector Slideup/Slidedown Instructions.
+(define_insn "@vslide<ud><mode>_vx"
+ [(set (match_operand:V 0 "register_operand" "=&vr,&vr,&vr,&vr,&vr,&vr,&vr,&vr")
+ (unspec:V
+ [(unspec:V
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:V
+ [(match_operand:V 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")
+ (match_operand:V 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand 4 "p_reg_or_uimm5_operand" "r,K,r,K,r,K,r,K")] SLIDE_UP)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vslide<ud>.vx\t%0,%3,%4,%1.t
+ vslide<ud>.vi\t%0,%3,%4,%1.t
+ vslide<ud>.vx\t%0,%3,%4,%1.t
+ vslide<ud>.vi\t%0,%3,%4,%1.t
+ vslide<ud>.vx\t%0,%3,%4
+ vslide<ud>.vi\t%0,%3,%4
+ vslide<ud>.vx\t%0,%3,%4
+ vslide<ud>.vi\t%0,%3,%4"
+ [(set_attr "type" "vslide")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vslide<ud><mode>_vx"
+ [(set (match_operand:V 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:V
+ [(unspec:V
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:V
+ [(match_operand:V 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")
+ (match_operand:V 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand 4 "p_reg_or_uimm5_operand" "r,K,r,K,r,K,r,K")] SLIDE_DOWN)
+ (match_dup 2)] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vslide<ud>.vx\t%0,%3,%4,%1.t
+ vslide<ud>.vi\t%0,%3,%4,%1.t
+ vslide<ud>.vx\t%0,%3,%4,%1.t
+ vslide<ud>.vi\t%0,%3,%4,%1.t
+ vslide<ud>.vx\t%0,%3,%4
+ vslide<ud>.vi\t%0,%3,%4
+ vslide<ud>.vx\t%0,%3,%4
+ vslide<ud>.vi\t%0,%3,%4"
+ [(set_attr "type" "vslide")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector Integer Slide1up/Slide1down Instructions.
+(define_insn "@vslide1<ud><mode>_vx_internal"
+ [(set (match_operand:VI 0 "register_operand" "=&vr,&vr,&vr,&vr,&vr,&vr,&vr,&vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:VI
+ [(match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand:<VSUB> 4 "reg_or_0_operand" "r,J,r,J,r,J,r,J")] SLIDE1_UP)
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vslide1<ud>.vx\t%0,%3,%4,%1.t
+ vslide1<ud>.vx\t%0,%3,zero,%1.t
+ vslide1<ud>.vx\t%0,%3,%4,%1.t
+ vslide1<ud>.vx\t%0,%3,zero,%1.t
+ vslide1<ud>.vx\t%0,%3,%4
+ vslide1<ud>.vx\t%0,%3,zero
+ vslide1<ud>.vx\t%0,%3,%4
+ vslide1<ud>.vx\t%0,%3,zero"
+ [(set_attr "type" "vslide")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vslide1<ud><mode>_vx_internal"
+ [(set (match_operand:VI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:VI
+ [(unspec:VI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:VI
+ [(match_operand:VI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand:<VSUB> 4 "reg_or_0_operand" "r,J,r,J,r,J,r,J")] SLIDE1_DOWN)
+ (match_operand:VI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vslide1<ud>.vx\t%0,%3,%4,%1.t
+ vslide1<ud>.vx\t%0,%3,zero,%1.t
+ vslide1<ud>.vx\t%0,%3,%4,%1.t
+ vslide1<ud>.vx\t%0,%3,zero,%1.t
+ vslide1<ud>.vx\t%0,%3,%4
+ vslide1<ud>.vx\t%0,%3,zero
+ vslide1<ud>.vx\t%0,%3,%4
+ vslide1<ud>.vx\t%0,%3,zero"
+ [(set_attr "type" "vslide")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vslide1<ud><mode>_vx_32bit"
+ [(set (match_operand:V64BITI 0 "register_operand" "=&vr,&vr,&vr,&vr,&vr,&vr,&vr,&vr")
+ (unspec:V64BITI
+ [(unspec:V64BITI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:V64BITI
+ [(match_operand:V64BITI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_0_operand" "r,J,r,J,r,J,r,J"))] SLIDE1_UP)
+ (match_operand:V64BITI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand:SI 5 "csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand:SI 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vslide1<ud>.vx\t%0,%3,%4,%1.t
+ vslide1<ud>.vx\t%0,%3,zero,%1.t
+ vslide1<ud>.vx\t%0,%3,%4,%1.t
+ vslide1<ud>.vx\t%0,%3,zero,%1.t
+ vslide1<ud>.vx\t%0,%3,%4
+ vslide1<ud>.vx\t%0,%3,zero
+ vslide1<ud>.vx\t%0,%3,%4
+ vslide1<ud>.vx\t%0,%3,zero"
+ [(set_attr "type" "vslide")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vslide1<ud><mode>_vx_32bit"
+ [(set (match_operand:V64BITI 0 "register_operand" "=vd,vd,vd,vd,vr,vr,vr,vr")
+ (unspec:V64BITI
+ [(unspec:V64BITI
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:V64BITI
+ [(match_operand:V64BITI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_0_operand" "r,J,r,J,r,J,r,J"))] SLIDE1_DOWN)
+ (match_operand:V64BITI 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand:SI 5 "csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand:SI 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vslide1<ud>.vx\t%0,%3,%4,%1.t
+ vslide1<ud>.vx\t%0,%3,zero,%1.t
+ vslide1<ud>.vx\t%0,%3,%4,%1.t
+ vslide1<ud>.vx\t%0,%3,zero,%1.t
+ vslide1<ud>.vx\t%0,%3,%4
+ vslide1<ud>.vx\t%0,%3,zero
+ vslide1<ud>.vx\t%0,%3,%4
+ vslide1<ud>.vx\t%0,%3,zero"
+ [(set_attr "type" "vslide")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector Floating-Point Slide1up/Slide1down Instructions.
+(define_insn "@vfslide1<ud><mode>_vf"
+ [(set (match_operand:VF 0 "register_operand" "=vd,vd,vr,vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VF
+ [(match_operand:VF 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f,f")] SLIDE1_DOWN)
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfslide1<ud>.vf\t%0,%3,%4,%1.t
+ vfslide1<ud>.vf\t%0,%3,%4,%1.t
+ vfslide1<ud>.vf\t%0,%3,%4
+ vfslide1<ud>.vf\t%0,%3,%4"
+ [(set_attr "type" "vslide")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@vfslide1<ud><mode>_vf"
+ [(set (match_operand:VF 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:VF
+ [(unspec:VF
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:VF
+ [(match_operand:VF 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:<VSUB> 4 "register_operand" "f,f,f,f")] SLIDE1_UP)
+ (match_operand:VF 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vfslide1<ud>.vf\t%0,%3,%4,%1.t
+ vfslide1<ud>.vf\t%0,%3,%4,%1.t
+ vfslide1<ud>.vf\t%0,%3,%4
+ vfslide1<ud>.vf\t%0,%3,%4"
+ [(set_attr "type" "vslide")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector vrgater instruction.
+(define_insn "@vrgather<mode>_vv"
+ [(set (match_operand:V 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:V
+ [(unspec:V
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:V
+ [(match_operand:V 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:<VMAP> 4 "register_operand" "vr,vr,vr,vr")] UNSPEC_RGATHER)
+ (match_operand:V 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vrgather.vv\t%0,%3,%4,%1.t
+ vrgather.vv\t%0,%3,%4,%1.t
+ vrgather.vv\t%0,%3,%4
+ vrgather.vv\t%0,%3,%4"
+ [(set_attr "type" "vgather")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector vrgaterei16 instruction.
+(define_insn "@vrgatherei16<V16:mode>_vv"
+ [(set (match_operand:V16 0 "register_operand" "=&vr,&vr,&vr,&vr")
+ (unspec:V16
+ [(unspec:V16
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+ (unspec:V16
+ [(match_operand:V16 3 "register_operand" "vr,vr,vr,vr")
+ (match_operand:<VMAPI16> 4 "register_operand" "vr,vr,vr,vr")] UNSPEC_RGATHEREI16)
+ (match_operand:V16 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vrgatherei16.vv\t%0,%3,%4,%1.t
+ vrgatherei16.vv\t%0,%3,%4,%1.t
+ vrgatherei16.vv\t%0,%3,%4
+ vrgatherei16.vv\t%0,%3,%4"
+ [(set_attr "type" "vgather")
+ (set_attr "mode" "<V16:MODE>")])
+
+;; Vector-Scalar vrgater instruction.
+(define_insn "@vrgather<mode>_vx"
+ [(set (match_operand:V 0 "register_operand" "=&vr,&vr,&vr,&vr,&vr,&vr,&vr,&vr")
+ (unspec:V
+ [(unspec:V
+ [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+ (unspec:V
+ [(match_operand:V 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr")
+ (match_operand 4 "p_reg_or_uimm5_operand" "r,K,r,K,r,K,r,K")] UNSPEC_RGATHER)
+ (match_operand:V 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+ (match_operand 5 "p_reg_or_const_csr_operand" "rK,rK,rK,rK,rK,rK,rK,rK")
+ (match_operand 6 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+ vrgather.vx\t%0,%3,%4,%1.t
+ vrgather.vi\t%0,%3,%4,%1.t
+ vrgather.vx\t%0,%3,%4,%1.t
+ vrgather.vi\t%0,%3,%4,%1.t
+ vrgather.vx\t%0,%3,%4
+ vrgather.vi\t%0,%3,%4
+ vrgather.vx\t%0,%3,%4
+ vrgather.vi\t%0,%3,%4"
+ [(set_attr "type" "vgather")
+ (set_attr "mode" "<MODE>")])
+
+;; Vector Compress Instruction.
+(define_insn "@vcompress<mode>_vm"
+ [(set (match_operand:V 0 "register_operand" "=&vr,&vr")
+ (unspec:V
+ [(unspec:V
+ [(match_operand:<VM> 1 "register_operand" "vm,vm")
+ (match_operand:V 2 "vector_reg_or_const0_operand" "0,J")
+ (match_operand:V 3 "register_operand" "vr,vr")] UNSPEC_COMPRESS)
+ (match_operand 4 "p_reg_or_const_csr_operand" "rK,rK")
+ (match_operand 5 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vcompress.vm\t%0,%3,%1"
+ [(set_attr "type" "vcompress")
+ (set_attr "mode" "<MODE>")])
\ No newline at end of file