@@ -555,7 +555,7 @@ riscv*)
extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o sifive-vector-builtins-bases.o"
extra_objs="${extra_objs} thead.o riscv-target-attr.o"
d_target_objs="riscv-d.o"
- extra_headers="riscv_vector.h riscv_crypto.h riscv_bitmanip.h riscv_th_vector.h riscv_cmo.h"
+ extra_headers="riscv_vector.h riscv_crypto.h riscv_bitmanip.h riscv_th_vector.h riscv_cmo.h sifive_vector.h"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
;;
@@ -305,3 +305,13 @@
"Shifting immediate for SIMD shufflei3."
(and (match_code "const_int")
(match_test "IN_RANGE (ival, -64, -1)")))
+
+(define_constraint "Ou01"
+ "A 1-bit unsigned immediate."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 1)")))
+
+(define_constraint "Ou02"
+ "A 2-bit unsigned immediate."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 3)")))
@@ -141,3 +141,7 @@
(eq_attr "type" "rdvlenb,rdvl")
"vxu_ooo_issue,vxu_ooo_issue")
+;; Vector sf_vcp.
+(define_insn_reservation "vec_sf_vcp" 2
+ (eq_attr "type" "sf_vc,sf_vc_se")
+ "vxu_ooo_issue")
@@ -303,6 +303,8 @@ main (int argc, const char **argv)
fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ %s,\n", eew,
inttype (eew, LMUL1_LOG2, /* unsigned_p */true).c_str ());
+ fprintf (fp, " /*X2*/ INVALID,\n");
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
@@ -426,6 +428,9 @@ main (int argc, const char **argv)
fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n",
eew);
+ fprintf (fp, " /*X2*/ %s,\n",
+ inttype (sew * 2, lmul_log2 + 1, /*unsigned_p*/ true).c_str ());
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
@@ -501,6 +506,8 @@ main (int argc, const char **argv)
for (unsigned eew : EEW_SIZE_LIST)
fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n", eew);
+ fprintf (fp, " /*X2*/ INVALID,\n");
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
@@ -588,6 +595,8 @@ main (int argc, const char **argv)
fprintf (fp, " /*UNSIGNED_EEW%d_LMUL1_INTERPRET*/ INVALID,\n",
eew);
+ fprintf (fp, " /*X2*/ INVALID,\n");
+
for (unsigned lmul_log2_offset : {1, 2, 3, 4, 5, 6})
{
unsigned multiple_of_lmul = 1 << lmul_log2_offset;
@@ -270,7 +270,8 @@ riscv_pragma_intrinsic (cpp_reader *)
const char *name = TREE_STRING_POINTER (x);
if (strcmp (name, "vector") == 0
- || strcmp (name, "xtheadvector") == 0)
+ || strcmp (name, "xtheadvector") == 0
+ || strcmp (name, "xsfvcp") == 0)
{
struct pragma_intrinsic_flags backup_flags;
@@ -1343,6 +1343,52 @@ struct sf_vfnrclip_def : public build_base
}
};
+/* sf_vcix_se_def class. */
+struct sf_vcix_se_def : public build_base
+{
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+ /* Return nullptr if it is overloaded. */
+ if (overloaded_p)
+ return nullptr;
+
+ b.append_base_name (instance.base_name);
+
+ /* vop --> vop<op>_se_<type>. */
+ if (!overloaded_p)
+ {
+ b.append_name (operand_suffixes[instance.op_info->op]);
+ b.append_name ("_se");
+ b.append_name (type_suffixes[instance.type.index].vector);
+ }
+ return b.finish_name ();
+ }
+};
+
+/* sf_vcix_def class. */
+struct sf_vcix_def : public build_base
+{
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+ /* Return nullptr if it is overloaded. */
+ if (overloaded_p)
+ return nullptr;
+
+ b.append_base_name (instance.base_name);
+
+ /* vop --> vop_<type>. */
+ if (!overloaded_p)
+ {
+ b.append_name (operand_suffixes[instance.op_info->op]);
+ b.append_name (type_suffixes[instance.type.index].vector);
+ }
+ return b.finish_name ();
+ }
+};
+
+
SHAPE(vsetvl, vsetvl)
SHAPE(vsetvl, vsetvlmax)
SHAPE(loadstore, loadstore)
@@ -1379,4 +1425,6 @@ SHAPE(crypto_vi, crypto_vi)
SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type)
SHAPE (sf_vqmacc, sf_vqmacc)
SHAPE (sf_vfnrclip, sf_vfnrclip)
+SHAPE(sf_vcix_se, sf_vcix_se)
+SHAPE(sf_vcix, sf_vcix)
} // end namespace riscv_vector
@@ -62,6 +62,8 @@ extern const function_shape *const crypto_vv_no_op_type;
/* Sifive vendor extension. */
extern const function_shape *const sf_vqmacc;
extern const function_shape *const sf_vfnrclip;
+extern const function_shape *const sf_vcix_se;
+extern const function_shape *const sf_vcix;
}
} // end namespace riscv_vector
@@ -369,6 +369,18 @@ along with GCC; see the file COPYING3. If not see
#define DEF_RVV_XFQF_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_X2_U_OPS" macro include unsigned integer which will
+ be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_X2_U_OPS
+#define DEF_RVV_X2_U_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_X2_WU_OPS" macro include widen unsigned integer which will
+ be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_X2_WU_OPS
+#define DEF_RVV_X2_WU_OPS(TYPE, REQUIRE)
+#endif
+
DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
@@ -1463,6 +1475,32 @@ DEF_RVV_XFQF_OPS (vint8mf2_t, 0)
DEF_RVV_XFQF_OPS (vint8m1_t, 0)
DEF_RVV_XFQF_OPS (vint8m2_t, 0)
+DEF_RVV_X2_U_OPS (vuint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_U_OPS (vuint8mf4_t, 0)
+DEF_RVV_X2_U_OPS (vuint8mf2_t, 0)
+DEF_RVV_X2_U_OPS (vuint8m1_t, 0)
+DEF_RVV_X2_U_OPS (vuint8m2_t, 0)
+DEF_RVV_X2_U_OPS (vuint8m4_t, 0)
+DEF_RVV_X2_U_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_U_OPS (vuint16mf2_t, 0)
+DEF_RVV_X2_U_OPS (vuint16m1_t, 0)
+DEF_RVV_X2_U_OPS (vuint16m2_t, 0)
+DEF_RVV_X2_U_OPS (vuint16m4_t, 0)
+DEF_RVV_X2_U_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_U_OPS (vuint32m1_t, 0)
+DEF_RVV_X2_U_OPS (vuint32m2_t, 0)
+DEF_RVV_X2_U_OPS (vuint32m4_t, 0)
+
+DEF_RVV_X2_WU_OPS (vuint16mf4_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_WU_OPS (vuint16mf2_t, 0)
+DEF_RVV_X2_WU_OPS (vuint16m1_t, 0)
+DEF_RVV_X2_WU_OPS (vuint16m2_t, 0)
+DEF_RVV_X2_WU_OPS (vuint16m4_t, 0)
+DEF_RVV_X2_WU_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
+DEF_RVV_X2_WU_OPS (vuint32m1_t, 0)
+DEF_RVV_X2_WU_OPS (vuint32m2_t, 0)
+DEF_RVV_X2_WU_OPS (vuint32m4_t, 0)
+
#undef DEF_RVV_I_OPS
#undef DEF_RVV_U_OPS
#undef DEF_RVV_F_OPS
@@ -1519,3 +1557,5 @@ DEF_RVV_XFQF_OPS (vint8m2_t, 0)
#undef DEF_RVV_F32_OPS
#undef DEF_RVV_QMACC_OPS
#undef DEF_RVV_XFQF_OPS
+#undef DEF_RVV_X2_U_OPS
+#undef DEF_RVV_X2_WU_OPS
\ No newline at end of file
@@ -544,6 +544,20 @@ static const rvv_type_info crypto_sew64_ops[] = {
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of signed integer will be registered for Sifive Xsfvcp intrinsic*/
+/* functions. */
+static const rvv_type_info x2_u_ops[] = {
+#define DEF_RVV_X2_U_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of signed integer will be registered for Sifive Xsfvcp intrinsic*/
+/* functions. */
+static const rvv_type_info x2_wu_ops[] = {
+#define DEF_RVV_X2_WU_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
/* A list of signed integer will be registered for intrinsic
* functions. */
static const rvv_type_info qmacc_ops[] = {
@@ -805,7 +819,7 @@ static CONSTEXPR const rvv_arg_type_info bf_wwxv_args[]
static CONSTEXPR const rvv_arg_type_info m_args[]
= {rvv_arg_type_info (RVV_BASE_mask), rvv_arg_type_info_end};
-/* A list of args for vector_type func (scalar_type) function. */
+/* A list of args for vector_type func (scalar_type/sf.vc) function. */
static CONSTEXPR const rvv_arg_type_info x_args[]
= {rvv_arg_type_info (RVV_BASE_scalar), rvv_arg_type_info_end};
@@ -1055,6 +1069,161 @@ static CONSTEXPR const rvv_arg_type_info scalar_ptr_size_args[]
rvv_arg_type_info (RVV_BASE_size), rvv_arg_type_info (RVV_BASE_vector),
rvv_arg_type_info_end};
+/* A list of args for vector_type func (sf.vc.x) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_x_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.x) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_x_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.i) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_i_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+
+/* A list of args for vector_type func (sf.vc.i) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_i_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.vv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_vv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.vv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_vv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.xv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_xv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.xv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_xv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.iv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_iv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.iv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_iv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.fv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_fv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar_float),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.v.fv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_v_fv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar_float),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.vvv/sf.vc.v.vvv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_vvv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.xvv/sf.vc.v.xvv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_xvv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.ivv/sf.vc.v.ivv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_ivv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_unsigned_vector),
+ rvv_arg_type_info (RVV_BASE_unsigned_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.fvv/sf.vc.v.fvv) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_fvv_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar_float),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.vvw/sf.vc.v.vvw) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_vvw_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_x2_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.xvw/sf.vc.v.xvw) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_xvw_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_x2_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.ivw/sf.vc.v.ivw) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_ivw_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_x2_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (sf.vc.fvw/sf.vc.v.fvw) function. */
+static CONSTEXPR const rvv_arg_type_info sf_vc_fvw_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar),
+ rvv_arg_type_info (RVV_BASE_x2_vector),
+ rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_scalar_float),
+ rvv_arg_type_info_end};
+
/* A list of none preds that will be registered for intrinsic functions. */
static CONSTEXPR const predication_type_index none_preds[]
= {PRED_TYPE_none, NUM_PRED_TYPES};
@@ -3006,6 +3175,174 @@ static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew64_ops
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vvv_args /* Args */};
+static CONSTEXPR const rvv_op_info sf_vc_x_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_x, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_x_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_x_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_x, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_x_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_i_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_i, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_i_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_i_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_i, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_i_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_vv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_vv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_xv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_xv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_xv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_xv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_xv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_xv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_iv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_iv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_iv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_iv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_iv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_iv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_fv_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_fv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_fv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_fv_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_v_fv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_v_fv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_vvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_vvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_vvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_vvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_vvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_vvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_xvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_xvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_xvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_xvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_xvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_xvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_ivv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_ivv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_ivv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_ivv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_v_ivv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_ivv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_fvv_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_fvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_fvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_fvv_ops
+ = {wextu_ops, /* Types */
+ OP_TYPE_v_fvv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ sf_vc_fvv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_vvw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_vvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_vvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_vvw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_v_vvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */
+ sf_vc_vvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_xvw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_xvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_xvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_xvw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_v_xvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */
+ sf_vc_xvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_ivw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_ivw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_ivw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_ivw_ops
+ = {x2_u_ops, /* Types */
+ OP_TYPE_v_ivw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */
+ sf_vc_ivw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_fvw_ops
+ = {x2_wu_ops, /* Types */
+ OP_TYPE_fvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ sf_vc_fvw_args /* Args */};
+
+static CONSTEXPR const rvv_op_info sf_vc_v_fvw_ops
+ = {x2_wu_ops, /* Types */
+ OP_TYPE_v_fvw, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_x2_vector), /* Return type */
+ sf_vc_fvw_args /* Args */};
+
/* A list of all RVV base function types. */
static CONSTEXPR const function_type_info function_types[] = {
#define DEF_RVV_TYPE_INDEX( \
@@ -3022,7 +3359,7 @@ static CONSTEXPR const function_type_info function_types[] = {
SIGNED_EEW16_LMUL1_INTERPRET, SIGNED_EEW32_LMUL1_INTERPRET, \
SIGNED_EEW64_LMUL1_INTERPRET, UNSIGNED_EEW8_LMUL1_INTERPRET, \
UNSIGNED_EEW16_LMUL1_INTERPRET, UNSIGNED_EEW32_LMUL1_INTERPRET, \
- UNSIGNED_EEW64_LMUL1_INTERPRET, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, \
+ UNSIGNED_EEW64_LMUL1_INTERPRET, X2, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT,\
X16_VLMUL_EXT, X32_VLMUL_EXT, X64_VLMUL_EXT, TUPLE_SUBPART) \
{ \
VECTOR_TYPE_##VECTOR, \
@@ -3087,6 +3424,7 @@ static CONSTEXPR const function_type_info function_types[] = {
VECTOR_TYPE_##UNSIGNED_EEW16_LMUL1_INTERPRET, \
VECTOR_TYPE_##UNSIGNED_EEW32_LMUL1_INTERPRET, \
VECTOR_TYPE_##UNSIGNED_EEW64_LMUL1_INTERPRET, \
+ VECTOR_TYPE_##X2, \
VECTOR_TYPE_##X2_VLMUL_EXT, \
VECTOR_TYPE_##X4_VLMUL_EXT, \
VECTOR_TYPE_##X8_VLMUL_EXT, \
@@ -3600,6 +3938,24 @@ rvv_arg_type_info::get_xfqf_float_type (vector_type_index type_idx) const
return NULL_TREE;
}
+tree
+rvv_arg_type_info::get_scalar_float_type (vector_type_index type_idx) const
+{
+ /* Convert vint types to their corresponding scalar float types.
+ Note:
+ - According to riscv-vector-builtins-types.def, the index of an unsigned
+ type is always one greater than its corresponding signed type.
+ - Conversion for vint8 types is not required. */
+ if (type_idx >= VECTOR_TYPE_vint16mf4_t && type_idx <= VECTOR_TYPE_vuint16m8_t)
+ return builtin_types[VECTOR_TYPE_vfloat16m1_t].scalar;
+ else if (type_idx >= VECTOR_TYPE_vint32mf2_t && type_idx <= VECTOR_TYPE_vuint32m8_t)
+ return builtin_types[VECTOR_TYPE_vfloat32m1_t].scalar;
+ else if (type_idx >= VECTOR_TYPE_vint64m1_t && type_idx <= VECTOR_TYPE_vuint64m8_t)
+ return builtin_types[VECTOR_TYPE_vfloat64m1_t].scalar;
+ else
+ return NULL_TREE;
+}
+
vector_type_index
rvv_arg_type_info::get_function_type_index (vector_type_index type_idx) const
{
@@ -3758,7 +4114,7 @@ function_instance::modifies_global_state_p () const
return true;
/* Handle direct modifications of global state. */
- return flags & (CP_WRITE_MEMORY | CP_WRITE_CSR);
+ return flags & (CP_WRITE_MEMORY | CP_WRITE_CSR | CP_USE_COPROCESSORS);
}
/* Return true if calls to the function could raise a signal. */
@@ -82,7 +82,7 @@ along with GCC; see the file COPYING3. If not see
SIGNED_EEW16_LMUL1_INTERPRET, SIGNED_EEW32_LMUL1_INTERPRET, \
SIGNED_EEW64_LMUL1_INTERPRET, UNSIGNED_EEW8_LMUL1_INTERPRET, \
UNSIGNED_EEW16_LMUL1_INTERPRET, UNSIGNED_EEW32_LMUL1_INTERPRET, \
- UNSIGNED_EEW64_LMUL1_INTERPRET, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, \
+ UNSIGNED_EEW64_LMUL1_INTERPRET, X2, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT,\
X16_VLMUL_EXT, X32_VLMUL_EXT, X64_VLMUL_EXT, TUPLE_SUBPART)
#endif
@@ -637,6 +637,32 @@ DEF_RVV_OP_TYPE (xu_w)
DEF_RVV_OP_TYPE (s)
DEF_RVV_OP_TYPE (4x8x4)
DEF_RVV_OP_TYPE (2x8x2)
+DEF_RVV_OP_TYPE (v_x)
+DEF_RVV_OP_TYPE (i)
+DEF_RVV_OP_TYPE (v_i)
+DEF_RVV_OP_TYPE (xv)
+DEF_RVV_OP_TYPE (iv)
+DEF_RVV_OP_TYPE (fv)
+DEF_RVV_OP_TYPE (vvv)
+DEF_RVV_OP_TYPE (xvv)
+DEF_RVV_OP_TYPE (ivv)
+DEF_RVV_OP_TYPE (fvv)
+DEF_RVV_OP_TYPE (vvw)
+DEF_RVV_OP_TYPE (xvw)
+DEF_RVV_OP_TYPE (ivw)
+DEF_RVV_OP_TYPE (fvw)
+DEF_RVV_OP_TYPE (v_vv)
+DEF_RVV_OP_TYPE (v_xv)
+DEF_RVV_OP_TYPE (v_iv)
+DEF_RVV_OP_TYPE (v_fv)
+DEF_RVV_OP_TYPE (v_vvv)
+DEF_RVV_OP_TYPE (v_xvv)
+DEF_RVV_OP_TYPE (v_ivv)
+DEF_RVV_OP_TYPE (v_fvv)
+DEF_RVV_OP_TYPE (v_vvw)
+DEF_RVV_OP_TYPE (v_xvw)
+DEF_RVV_OP_TYPE (v_ivw)
+DEF_RVV_OP_TYPE (v_fvw)
DEF_RVV_PRED_TYPE (ta)
DEF_RVV_PRED_TYPE (tu)
@@ -720,6 +746,7 @@ DEF_RVV_BASE_TYPE (unsigned_eew8_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (unsigned_eew16_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (unsigned_eew32_lmul1_interpret, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (unsigned_eew64_lmul1_interpret, get_vector_type (type_idx))
+DEF_RVV_BASE_TYPE (x2_vector, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x2, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x4, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (vlmul_ext_x8, get_vector_type (type_idx))
@@ -729,6 +756,7 @@ DEF_RVV_BASE_TYPE (vlmul_ext_x64, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (size_ptr, build_pointer_type (size_type_node))
DEF_RVV_BASE_TYPE (tuple_subpart, get_tuple_subpart_type (type_idx))
DEF_RVV_BASE_TYPE (xfqf_float, get_xfqf_float_type (type_idx))
+DEF_RVV_BASE_TYPE (scalar_float, get_scalar_float_type (type_idx))
DEF_RVV_VXRM_ENUM (RNU, VXRM_RNU)
DEF_RVV_VXRM_ENUM (RNE, VXRM_RNE)
@@ -130,6 +130,7 @@ enum required_ext
XSFVQMACCQOQ_EXT, /* XSFVQMACCQOQ extension */
XSFVQMACCDOD_EXT, /* XSFVQMACCDOD extension */
XSFVFNRCLIPXFQF_EXT, /* XSFVFNRCLIPXFQF extension */
+ XSFVCP_EXT, /* XSFVCP extension*/
/* Please update below to isa_name func when add or remove enum type(s). */
};
@@ -169,6 +170,8 @@ static inline const char * required_ext_to_isa_name (enum required_ext required)
return "xsfvqmaccdod";
case XSFVFNRCLIPXFQF_EXT:
return "xsfvfnrclipxfqf";
+ case XSFVCP_EXT:
+ return "xsfvcp";
default:
gcc_unreachable ();
}
@@ -212,6 +215,8 @@ static inline bool required_extensions_specified (enum required_ext required)
return TARGET_XSFVQMACCDOD;
case XSFVFNRCLIPXFQF_EXT:
return TARGET_XSFVFNRCLIPXFQF;
+ case XSFVCP_EXT:
+ return TARGET_XSFVCP;
default:
gcc_unreachable ();
}
@@ -297,6 +302,7 @@ struct rvv_arg_type_info
tree get_tree_type (vector_type_index) const;
tree get_tuple_subpart_type (vector_type_index) const;
tree get_xfqf_float_type (vector_type_index) const;
+ tree get_scalar_float_type (vector_type_index) const;
};
/* Static information for each operand. */
@@ -359,6 +365,8 @@ struct function_group_info
return TARGET_XSFVQMACCDOD;
case XSFVFNRCLIPXFQF_EXT:
return TARGET_XSFVFNRCLIPXFQF;
+ case XSFVCP_EXT:
+ return TARGET_XSFVCP;
default:
gcc_unreachable ();
}
@@ -482,6 +482,8 @@
;; SiFive custom extension instrctions
;; sf_vqmacc vector matrix integer multiply-add instructions
;; sf_vfnrclip vector fp32 to int8 ranged clip instructions
+;; sf_vc vector coprocessor interface without side effect
+;; sf_vc_se vector coprocessor interface with side effect
(define_attr "type"
"unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
@@ -503,7 +505,8 @@
vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
- vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16"
+ vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16,
+ sf_vc,sf_vc_se"
(cond [(eq_attr "got" "load") (const_string "load")
;; If a doubleword move uses these expensive instructions,
@@ -195,12 +195,89 @@ public:
}
};
+/* Implements SiFive sf.vc. */
+class sf_vc : public function_base
+{
+public:
+
+ unsigned int call_properties (const function_instance &) const override
+ {
+ return CP_USE_COPROCESSORS;
+ }
+
+ rtx expand (function_expander &e) const override
+ {
+ switch (e.op_info->op)
+ {
+ case OP_TYPE_x:
+ return e.use_exact_insn (code_for_sf_vc_x_se (e.vector_mode ()));
+ case OP_TYPE_i:
+ return e.use_exact_insn (code_for_sf_vc_i_se (e.vector_mode ()));
+ case OP_TYPE_vv:
+ return e.use_exact_insn (code_for_sf_vc_vv_se (e.vector_mode ()));
+ case OP_TYPE_xv:
+ return e.use_exact_insn (code_for_sf_vc_xv_se (e.vector_mode ()));
+ case OP_TYPE_iv:
+ return e.use_exact_insn (code_for_sf_vc_iv_se (e.vector_mode ()));
+ case OP_TYPE_fv:
+ return e.use_exact_insn (code_for_sf_vc_fv_se (e.vector_mode ()));
+ case OP_TYPE_v_x:
+ return e.use_exact_insn (code_for_sf_vc_v_x_se (e.vector_mode ()));
+ case OP_TYPE_v_i:
+ return e.use_exact_insn (code_for_sf_vc_v_i_se (e.vector_mode ()));
+ case OP_TYPE_v_vv:
+ return e.use_exact_insn (code_for_sf_vc_v_vv_se (e.vector_mode ()));
+ case OP_TYPE_v_xv:
+ return e.use_exact_insn (code_for_sf_vc_v_xv_se (e.vector_mode ()));
+ case OP_TYPE_v_iv:
+ return e.use_exact_insn (code_for_sf_vc_v_iv_se (e.vector_mode ()));
+ case OP_TYPE_v_fv:
+ return e.use_exact_insn (code_for_sf_vc_v_fv_se (e.vector_mode ()));
+ case OP_TYPE_vvv:
+ return e.use_exact_insn (code_for_sf_vc_vvv_se (e.vector_mode ()));
+ case OP_TYPE_xvv:
+ return e.use_exact_insn (code_for_sf_vc_xvv_se (e.vector_mode ()));
+ case OP_TYPE_ivv:
+ return e.use_exact_insn (code_for_sf_vc_ivv_se (e.vector_mode ()));
+ case OP_TYPE_fvv:
+ return e.use_exact_insn (code_for_sf_vc_fvv_se (e.vector_mode ()));
+ case OP_TYPE_vvw:
+ return e.use_exact_insn (code_for_sf_vc_vvw_se (e.vector_mode ()));
+ case OP_TYPE_xvw:
+ return e.use_exact_insn (code_for_sf_vc_xvw_se (e.vector_mode ()));
+ case OP_TYPE_ivw:
+ return e.use_exact_insn (code_for_sf_vc_ivw_se (e.vector_mode ()));
+ case OP_TYPE_fvw:
+ return e.use_exact_insn (code_for_sf_vc_fvw_se (e.vector_mode ()));
+ case OP_TYPE_v_vvv:
+ return e.use_exact_insn (code_for_sf_vc_v_vvv_se (e.vector_mode ()));
+ case OP_TYPE_v_xvv:
+ return e.use_exact_insn (code_for_sf_vc_v_xvv_se (e.vector_mode ()));
+ case OP_TYPE_v_ivv:
+ return e.use_exact_insn (code_for_sf_vc_v_ivv_se (e.vector_mode ()));
+ case OP_TYPE_v_fvv:
+ return e.use_exact_insn (code_for_sf_vc_v_fvv_se (e.vector_mode ()));
+ case OP_TYPE_v_vvw:
+ return e.use_exact_insn (code_for_sf_vc_v_vvw_se (e.vector_mode ()));
+ case OP_TYPE_v_xvw:
+ return e.use_exact_insn (code_for_sf_vc_v_xvw_se (e.vector_mode ()));
+ case OP_TYPE_v_ivw:
+ return e.use_exact_insn (code_for_sf_vc_v_ivw_se (e.vector_mode ()));
+ case OP_TYPE_v_fvw:
+ return e.use_exact_insn (code_for_sf_vc_v_fvw_se (e.vector_mode ()));
+ default:
+ gcc_unreachable ();
+ }
+ }
+};
+
static CONSTEXPR const sf_vqmacc sf_vqmacc_obj;
static CONSTEXPR const sf_vqmaccu sf_vqmaccu_obj;
static CONSTEXPR const sf_vqmaccsu sf_vqmaccsu_obj;
static CONSTEXPR const sf_vqmaccus sf_vqmaccus_obj;
static CONSTEXPR const sf_vfnrclip_x_f_qf<UNSPEC_SF_VFNRCLIP> sf_vfnrclip_x_f_qf_obj;
static CONSTEXPR const sf_vfnrclip_xu_f_qf<UNSPEC_SF_VFNRCLIPU> sf_vfnrclip_xu_f_qf_obj;
+static CONSTEXPR const sf_vc sf_vc_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
@@ -213,4 +290,5 @@ BASE (sf_vqmaccsu)
BASE (sf_vqmaccus)
BASE (sf_vfnrclip_x_f_qf)
BASE (sf_vfnrclip_xu_f_qf)
+BASE (sf_vc)
} // end namespace riscv_vector
@@ -23,6 +23,8 @@
namespace riscv_vector {
+static const unsigned int CP_USE_COPROCESSORS = 1U << 6;
+
namespace bases {
extern const function_base *const sf_vqmacc;
extern const function_base *const sf_vqmaccu;
@@ -30,6 +32,7 @@ extern const function_base *const sf_vqmaccsu;
extern const function_base *const sf_vqmaccus;
extern const function_base *const sf_vfnrclip_x_f_qf;
extern const function_base *const sf_vfnrclip_xu_f_qf;
+extern const function_base *const sf_vc;
}
} // end namespace riscv_vector
@@ -55,4 +55,49 @@ DEF_RVV_FUNCTION (sf_vfnrclip_x_f_qf, sf_vfnrclip, full_preds, i_clip_qf_ops)
DEF_RVV_FUNCTION (sf_vfnrclip_xu_f_qf, sf_vfnrclip, full_preds, u_clip_qf_ops)
#undef REQUIRED_EXTENSIONS
+#define REQUIRED_EXTENSIONS XSFVCP_EXT
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_x_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_i_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_iv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_ivv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_vvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_xvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_ivw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_fvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_x_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_i_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_iv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_ivv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_vvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_xvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_ivw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix_se, none_preds, sf_vc_v_fvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_x_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_i_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_iv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_ivv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fvv_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_vvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_xvw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_ivw_ops)
+DEF_RVV_FUNCTION (sf_vc, sf_vcix, none_preds, sf_vc_v_fvw_ops)
+#undef REQUIRED_EXTENSIONS
+
#undef DEF_RVV_FUNCTION
@@ -182,3 +182,874 @@
"sf.vfnrclip.x<v_su>.f.qf\t%0,%3,%4%p1"
[(set_attr "type" "sf_vfnrclip")
(set_attr "mode" "<MODE>")])
+
+;; SF_VCP
+(define_insn "@sf_vc_x_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:SI 3 "const_int_operand" "K")
+ (match_operand:<VEL> 4 "register_operand" "r")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.x\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_x_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr")
+ (if_then_else:VFULLI
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:SI 4 "const_int_operand" "K,K")
+ (match_operand:<VEL> 5 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.x\t%3,%4,%0,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_x<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:SI 4 "const_int_operand" "K,K")
+ (match_operand:<VEL> 5 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.x\t%3,%4,%0,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_i_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:SI 3 "const_int_operand" "K")
+ (match_operand:SI 4 "const_int_operand" "P")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.i\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_i_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr")
+ (if_then_else:VFULLI
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:SI 4 "const_int_operand" "K,K")
+ (match_operand:SI 5 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.i\t%3,%4,%0,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_i<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:SI 4 "const_int_operand" "K,K")
+ (match_operand:SI 5 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.i\t%3,%4,%0,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_vv_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:VFULLI 4 "register_operand" "vr")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.vv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_xv_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:<VEL> 4 "register_operand" "r")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.xv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd")
+ (if_then_else:VFULLI
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:<VEL> 5 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:<VEL> 5 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_iv_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:SI 4 "const_int_operand" "P")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.iv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_iv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd")
+ (if_then_else:VFULLI
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:SI 5 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.iv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_iv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vd,vd")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vr,vr")
+ (match_operand:SI 5 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.iv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_fv_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:SF_FV
+ [(match_operand:SI 1 "const_int_operand" "Ou01")
+ (match_operand:SI 2 "const_int_operand" "K")
+ (match_operand:SF_FV 3 "register_operand" "vr")
+ (match_operand:<SF_XF> 4 "register_operand" "f")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.fv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fv_se<mode>"
+ [(set (match_operand:SF_FV 0 "register_operand" "=&vd,vd")
+ (if_then_else:SF_FV
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:SF_FV
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:SF_FV 4 "register_operand" "vr,vr")
+ (match_operand:<SF_XF> 5 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fv<mode>"
+ [(set (match_operand:SF_FV 0 "register_operand" "=&vd,vd")
+ (if_then_else:SF_FV
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 6 "vector_length_operand" " rK, rK")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:SF_FV
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:SF_FV 4 "register_operand" "vr,vr")
+ (match_operand:<SF_XF> 5 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fv\t%3,%0,%4,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_vvv_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:VFULLI 2 "register_operand" "vd")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:VFULLI 4 "register_operand" "vr")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.vvv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vvv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:VFULLI 6 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vvv\t%3,%4,%6,%5"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vvv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:VFULLI 6 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vvv\t%3,%4,%6,%5"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_xvv_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:VFULLI 2 "register_operand" "vd")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:<VEL> 4 "register_operand" "r")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.xvv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xvv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:<VEL> 6 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xvv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xvv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:<VEL> 6 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xvv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_ivv_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:VFULLI 2 "register_operand" "vd")
+ (match_operand:VFULLI 3 "register_operand" "vr")
+ (match_operand:SI 4 "const_int_operand" "P")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.ivv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_ivv_se<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:SI 6 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.ivv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_ivv<mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=&vr,vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:VFULLI 4 "register_operand" "vd,vd")
+ (match_operand:VFULLI 5 "register_operand" "vr,vr")
+ (match_operand:SI 6 "const_int_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:VFULLI 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.ivv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_fvv_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:SF_FV
+ [(match_operand:SI 1 "const_int_operand" "Ou01")
+ (match_operand:SF_FV 2 "register_operand" "vd")
+ (match_operand:SF_FV 3 "register_operand" "vr")
+ (match_operand:<SF_XF> 4 "register_operand" "f")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.fvv\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fvv_se<mode>"
+ [(set (match_operand:SF_FV 0 "register_operand" "=&vr,vr")
+ (if_then_else:SF_FV
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:SF_FV
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:SF_FV 4 "register_operand" "vd,vd")
+ (match_operand:SF_FV 5 "register_operand" "vr,vr")
+ (match_operand:<SF_XF> 6 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fvv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fvv<mode>"
+ [(set (match_operand:SF_FV 0 "register_operand" "=&vr,vr")
+ (if_then_else:SF_FV
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:SF_FV
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:SF_FV 4 "register_operand" "vd,vd")
+ (match_operand:SF_FV 5 "register_operand" "vr,vr")
+ (match_operand:<SF_XF> 6 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:SF_FV 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fvv\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_vvw_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:<SF_VW> 2 "register_operand" "vd")
+ (match_operand:SF_VC_W 3 "register_operand" "vr")
+ (match_operand:SF_VC_W 4 "register_operand" "vr")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.vvw\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vvw_se<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:SF_VC_W 6 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_vvw<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:SF_VC_W 6 "register_operand" "vr,vr")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.vvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_xvw_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:<SF_VW> 2 "register_operand" "vd")
+ (match_operand:SF_VC_W 3 "register_operand" "vr")
+ (match_operand:<VEL> 4 "register_operand" "r")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.xvw\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xvw_se<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:<VEL> 6 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_xvw<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:<VEL> 6 "register_operand" "r,r")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.xvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_ivw_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 1 "const_int_operand" "Ou02")
+ (match_operand:<SF_VW> 2 "register_operand" "vd")
+ (match_operand:SF_VC_W 3 "register_operand" "vr")
+ (match_operand:SI 4 "immediate_operand" "P")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.ivw\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_ivw_se<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:SI 6 "immediate_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.ivw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_ivw<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou02,Ou02")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_W 5 "register_operand" "vr,vr")
+ (match_operand:SI 6 "immediate_operand" "P,P")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.ivw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_fvw_se<mode>"
+ [(unspec_volatile:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" " Wc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 1 "const_int_operand" "Ou01")
+ (match_operand:<SF_VW> 2 "register_operand" "vd")
+ (match_operand:SF_VC_FW 3 "register_operand" "vr")
+ (match_operand:<SF_XFW> 4 "register_operand" "f")] UNSPEC_SF_CV)]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.fvw\t%1,%2,%3,%4"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fvw_se<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec_volatile:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec_volatile:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_FW 5 "register_operand" "vr,vr")
+ (match_operand:<SF_XFW> 6 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc_se")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@sf_vc_v_fvw<mode>"
+ [(set (match_operand:<SF_VW> 0 "register_operand" "=&vr,vr")
+ (if_then_else:<SF_VW>
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " Wc1,Wc1")
+ (match_operand 7 "vector_length_operand" " rK, rK")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (match_operand 10 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:<SF_VW>
+ [(match_operand:SI 3 "const_int_operand" "Ou01,Ou01")
+ (match_operand:<SF_VW> 4 "register_operand" "vd,vd")
+ (match_operand:SF_VC_FW 5 "register_operand" "vr,vr")
+ (match_operand:<SF_XFW> 6 "register_operand" "f,f")] UNSPEC_SF_CV)
+ (match_operand:<SF_VW> 2 "vector_merge_operand" "vu,vu")))]
+ "TARGET_VECTOR && TARGET_XSFVCP"
+ "sf.vc.v.fvw\t%3,%4,%5,%6"
+ [(set_attr "type" "sf_vc")
+ (set_attr "mode" "<MODE>")])
new file mode 100644
@@ -0,0 +1,47 @@
+/* RISC-V SiFive custom 'V' Extension intrinsics include file.
+ Copyright (C) 2024-2025 Free Software Foundation, Inc.
+ Contributed by SiFive and PLCT Lab.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef _SIFIVE_VECTOR_H_
+#define _SIFIVE_VECTOR_H_
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* NOTE: This implementation of sifive_vector.h is intentionally short. It does
+ not define the RVV types and intrinsic functions directly in C and C++
+ code, but instead uses the following pragma to tell GCC to insert the
+ necessary type and function definitions itself. The net effect is the
+ same, and the file is a complete implementation of sifive_vector.h. */
+#pragma riscv intrinsic "xsfvcp"
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+#endif // _SIFIVE_VECTOR_H_
@@ -106,6 +106,7 @@
UNSPEC_SF_VFNRCLIP
UNSPEC_SF_VFNRCLIPU
+ UNSPEC_SF_CV
])
(define_c_enum "unspecv" [
@@ -4814,3 +4815,50 @@
(RVVM1QI "rvvm4sf")
(RVVM2QI "rvvm8sf")
])
+
+
+(define_mode_iterator SF_VC_W [
+ RVVM4QI RVVM2QI RVVM1QI RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
+ RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_attr SF_VW [
+ (RVVM4QI "RVVM8HI") (RVVM2QI "RVVM4HI") (RVVM1QI "RVVM2HI") (RVVMF2QI "RVVM1HI")
+ (RVVMF4QI "RVVMF2HI") (RVVMF8QI "RVVMF4HI")
+ (RVVM4HI "RVVM8SI") (RVVM2HI "RVVM4SI") (RVVM1HI "RVVM2SI") (RVVMF2HI "RVVM1SI")
+ (RVVMF4HI "RVVMF2SI")
+ (RVVM4SI "RVVM8DI") (RVVM2SI "RVVM4DI") (RVVM1SI "RVVM2DI") (RVVMF2SI "RVVM1DI")
+])
+
+(define_mode_attr sf_vw [
+ (RVVM4QI "rvvm8hi") (RVVM2QI "rvvm4hi") (RVVM1QI "rvvm2hi") (RVVMF2QI "rvvm1hi")
+ (RVVMF4QI "rvvmf2hi") (RVVMF8QI "rvvmf4hi")
+ (RVVM4HI "rvvm8si") (RVVM2HI "rvvm4si") (RVVM1HI "rvvm2si") (RVVMF2HI "rvvm1si")
+ (RVVMF4HI "rvvmf2si")
+ (RVVM4SI "rvvm8di") (RVVM2SI "rvvm4di") (RVVM1SI "rvvm2di") (RVVMF2SI "rvvm1di")
+])
+
+(define_mode_iterator SF_FV [
+ RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+ (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
+ (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
+])
+
+
+(define_mode_iterator SF_VC_FW [
+ RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
+ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_attr SF_XF [
+ (RVVM8HI "HF") (RVVM4HI "HF") (RVVM2HI "HF") (RVVM1HI "HF") (RVVMF2HI "HF") (RVVMF4HI "HF")
+ (RVVM8SI "SF") (RVVM4SI "SF") (RVVM2SI "SF") (RVVM1SI "SF") (RVVMF2SI "SF")
+ (RVVM8DI "DF") (RVVM4DI "DF") (RVVM2DI "DF") (RVVM1DI "DF")
+])
+
+(define_mode_attr SF_XFW [
+ (RVVM4HI "HF") (RVVM2HI "HF") (RVVM1HI "HF") (RVVMF2HI "HF") (RVVMF4HI "HF")
+ (RVVM4SI "SF") (RVVM2SI "SF") (RVVM1SI "SF") (RVVMF2SI "SF")
+])
@@ -56,8 +56,7 @@
vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vcpop,vclz,vctz,vrol,\
vror,vwsll,vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,\
- vfncvtbf16,vfwcvtbf16,vfwmaccbf16,\
- sf_vqmacc,sf_vfnrclip")
+ vfncvtbf16,vfwcvtbf16,vfwmaccbf16,sf_vqmacc,sf_vfnrclip,sf_vc,sf_vc_se")
(const_string "true")]
(const_string "false")))