@@ -1510,6 +1510,79 @@ vundefined::expand (const function_instance &, tree, rtx target) const
return target;
}
+/* A function implementation for vset functions. */
+char *
+vset::assemble_name (function_instance &instance)
+{
+ machine_mode tmode = instance.get_arg_pattern ().arg_list[0];
+ machine_mode smode = instance.get_arg_pattern ().arg_list[2];
+ if (GET_MODE_INNER (tmode) != GET_MODE_INNER (smode))
+ return nullptr;
+
+ if (tmode == smode)
+ return nullptr;
+
+ if (known_lt (GET_MODE_SIZE (tmode), GET_MODE_SIZE (smode)))
+ return nullptr;
+
+ intrinsic_rename (instance, 0, 2);
+ append_name (instance.get_base_name ());
+ return finish_name ();
+}
+
+void
+vset::get_argument_types (const function_instance &instance,
+ vec<tree> &argument_types) const
+{
+ misc::get_argument_types (instance, argument_types);
+ argument_types.quick_push (size_type_node);
+ argument_types.quick_push (get_dt_t_with_index (instance, 2));
+}
+
+rtx
+vset::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ enum insn_code icode = code_for_vset (instance.get_arg_pattern ().arg_list[0]);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vget functions. */
+char *
+vget::assemble_name (function_instance &instance)
+{
+ machine_mode tmode = instance.get_arg_pattern ().arg_list[0];
+ machine_mode smode = instance.get_arg_pattern ().arg_list[1];
+ if (GET_MODE_INNER (tmode) != GET_MODE_INNER (smode))
+ return nullptr;
+
+ if (tmode == smode)
+ return nullptr;
+
+ if (known_gt (GET_MODE_SIZE (tmode), GET_MODE_SIZE (smode)))
+ return nullptr;
+
+ bool unsigned_p = instance.get_data_type_list ()[0] == DT_unsigned;
+ intrinsic_rename (instance, 0, 1);
+ append_name (instance.get_base_name ());
+ append_name (mode2data_type_str (tmode, unsigned_p, false));
+ return finish_name ();
+}
+
+void
+vget::get_argument_types (const function_instance &instance,
+ vec<tree> &argument_types) const
+{
+ misc::get_argument_types (instance, argument_types);
+ argument_types.quick_push (size_type_node);
+}
+
+rtx
+vget::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ enum insn_code icode = code_for_vget (instance.get_arg_pattern ().arg_list[0]);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
/* A function implementation for loadstore functions. */
char *
loadstore::assemble_name (function_instance &instance)
@@ -56,6 +56,12 @@ DEF_RVV_FUNCTION(vlmul_trunc, vlmul_trunc, (2, VITER(VLMULTRUNC, signed), VITER(
DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VI, signed)), PAT_none, PRED_none, OP_none)
DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VI, unsigned)), PAT_none, PRED_none, OP_none)
DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VF, signed)), PAT_none, PRED_none, OP_none)
+DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETI, signed), VATTR(0, VSETI, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETI, unsigned), VATTR(0, VSETI, unsigned), VITER(VFULL, unsigned)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETF, signed), VATTR(0, VSETF, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETI, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETI, unsigned), VITER(VFULL, unsigned)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETF, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v)
/* 7. Vector Loads and Stores. */
DEF_RVV_FUNCTION(vle, vle, (2, VITER(VI, signed), VATTR(0, VSUB, c_ptr)), pat_mask_tail, pred_all, OP_v)
DEF_RVV_FUNCTION(vle, vle, (2, VITER(VI, unsigned), VATTR(0, VSUB, c_uptr)), pat_mask_tail, pred_all, OP_v)
@@ -584,6 +584,34 @@ public:
virtual rtx expand (const function_instance &, tree, rtx) const override;
};
+/* A function_base for vset functions. */
+class vset : public misc
+{
+public:
+ // use the same construction function as the misc
+ using misc::misc;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vget functions. */
+class vget : public misc
+{
+public:
+ // use the same construction function as the misc
+ using misc::misc;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
/* A function_base for loadstore functions. */
class loadstore : public function_builder
{
new file mode 100644
@@ -0,0 +1,730 @@
+/* { dg-do compile } */
+/* { dg-skip-if "test vector intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */
+
+#include <stddef.h>
+#include <riscv_vector.h>
+
+
+vint8m2_t
+test_vset_v_i8m1_i8m2 (vint8m2_t dest, vint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m4_t
+test_vset_v_i8m1_i8m4 (vint8m4_t dest, vint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m4_t
+test_vset_v_i8m2_i8m4 (vint8m4_t dest, vint8m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m1_i8m8 (vint8m8_t dest, vint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m2_i8m8 (vint8m8_t dest, vint8m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m4_i8m8 (vint8m8_t dest, vint8m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m1_t
+test_vget_v_i8m2_i8m1 (vint8m2_t src)
+{
+ return vget_i8m1(src, 1);
+}
+
+vint8m1_t
+test_vget_v_i8m4_i8m1 (vint8m4_t src)
+{
+ return vget_i8m1(src, 1);
+}
+
+vint8m1_t
+test_vget_v_i8m8_i8m1 (vint8m8_t src)
+{
+ return vget_i8m1(src, 1);
+}
+
+vint8m2_t
+test_vget_v_i8m4_i8m2 (vint8m4_t src)
+{
+ return vget_i8m2(src, 1);
+}
+
+vint8m2_t
+test_vget_v_i8m8_i8m2 (vint8m8_t src)
+{
+ return vget_i8m2(src, 1);
+}
+
+vint8m4_t
+test_vget_v_i8m8_i8m4 (vint8m8_t src)
+{
+ return vget_i8m4(src, 1);
+}
+
+vint16m2_t
+test_vset_v_i16m1_i16m2 (vint16m2_t dest, vint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m4_t
+test_vset_v_i16m1_i16m4 (vint16m4_t dest, vint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m4_t
+test_vset_v_i16m2_i16m4 (vint16m4_t dest, vint16m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m1_i16m8 (vint16m8_t dest, vint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m2_i16m8 (vint16m8_t dest, vint16m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m4_i16m8 (vint16m8_t dest, vint16m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m1_t
+test_vget_v_i16m2_i16m1 (vint16m2_t src)
+{
+ return vget_i16m1(src, 1);
+}
+
+vint16m1_t
+test_vget_v_i16m4_i16m1 (vint16m4_t src)
+{
+ return vget_i16m1(src, 1);
+}
+
+vint16m1_t
+test_vget_v_i16m8_i16m1 (vint16m8_t src)
+{
+ return vget_i16m1(src, 1);
+}
+
+vint16m2_t
+test_vget_v_i16m4_i16m2 (vint16m4_t src)
+{
+ return vget_i16m2(src, 1);
+}
+
+vint16m2_t
+test_vget_v_i16m8_i16m2 (vint16m8_t src)
+{
+ return vget_i16m2(src, 1);
+}
+
+vint16m4_t
+test_vget_v_i16m8_i16m4 (vint16m8_t src)
+{
+ return vget_i16m4(src, 1);
+}
+
+vint32m2_t
+test_vset_v_i32m1_i32m2 (vint32m2_t dest, vint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m4_t
+test_vset_v_i32m1_i32m4 (vint32m4_t dest, vint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m4_t
+test_vset_v_i32m2_i32m4 (vint32m4_t dest, vint32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m1_i32m8 (vint32m8_t dest, vint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m2_i32m8 (vint32m8_t dest, vint32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m4_i32m8 (vint32m8_t dest, vint32m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m1_t
+test_vget_v_i32m2_i32m1 (vint32m2_t src)
+{
+ return vget_i32m1(src, 1);
+}
+
+vint32m1_t
+test_vget_v_i32m4_i32m1 (vint32m4_t src)
+{
+ return vget_i32m1(src, 1);
+}
+
+vint32m1_t
+test_vget_v_i32m8_i32m1 (vint32m8_t src)
+{
+ return vget_i32m1(src, 1);
+}
+
+vint32m2_t
+test_vget_v_i32m4_i32m2 (vint32m4_t src)
+{
+ return vget_i32m2(src, 1);
+}
+
+vint32m2_t
+test_vget_v_i32m8_i32m2 (vint32m8_t src)
+{
+ return vget_i32m2(src, 1);
+}
+
+vint32m4_t
+test_vget_v_i32m8_i32m4 (vint32m8_t src)
+{
+ return vget_i32m4(src, 1);
+}
+
+vint64m2_t
+test_vset_v_i64m1_i64m2 (vint64m2_t dest, vint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m4_t
+test_vset_v_i64m1_i64m4 (vint64m4_t dest, vint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m4_t
+test_vset_v_i64m2_i64m4 (vint64m4_t dest, vint64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m1_i64m8 (vint64m8_t dest, vint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m2_i64m8 (vint64m8_t dest, vint64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m4_i64m8 (vint64m8_t dest, vint64m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m1_t
+test_vget_v_i64m2_i64m1 (vint64m2_t src)
+{
+ return vget_i64m1(src, 1);
+}
+
+vint64m1_t
+test_vget_v_i64m4_i64m1 (vint64m4_t src)
+{
+ return vget_i64m1(src, 1);
+}
+
+vint64m1_t
+test_vget_v_i64m8_i64m1 (vint64m8_t src)
+{
+ return vget_i64m1(src, 1);
+}
+
+vint64m2_t
+test_vget_v_i64m4_i64m2 (vint64m4_t src)
+{
+ return vget_i64m2(src, 1);
+}
+
+vint64m2_t
+test_vget_v_i64m8_i64m2 (vint64m8_t src)
+{
+ return vget_i64m2(src, 1);
+}
+
+vint64m4_t
+test_vget_v_i64m8_i64m4 (vint64m8_t src)
+{
+ return vget_i64m4(src, 1);
+}
+
+vuint8m2_t
+test_vset_v_u8m1_u8m2 (vuint8m2_t dest, vuint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m4_t
+test_vset_v_u8m1_u8m4 (vuint8m4_t dest, vuint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m4_t
+test_vset_v_u8m2_u8m4 (vuint8m4_t dest, vuint8m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m1_u8m8 (vuint8m8_t dest, vuint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m2_u8m8 (vuint8m8_t dest, vuint8m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m4_u8m8 (vuint8m8_t dest, vuint8m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m1_t
+test_vget_v_u8m2_u8m1 (vuint8m2_t src)
+{
+ return vget_u8m1(src, 1);
+}
+
+vuint8m1_t
+test_vget_v_u8m4_u8m1 (vuint8m4_t src)
+{
+ return vget_u8m1(src, 1);
+}
+
+vuint8m1_t
+test_vget_v_u8m8_u8m1 (vuint8m8_t src)
+{
+ return vget_u8m1(src, 1);
+}
+
+vuint8m2_t
+test_vget_v_u8m4_u8m2 (vuint8m4_t src)
+{
+ return vget_u8m2(src, 1);
+}
+
+vuint8m2_t
+test_vget_v_u8m8_u8m2 (vuint8m8_t src)
+{
+ return vget_u8m2(src, 1);
+}
+
+vuint8m4_t
+test_vget_v_u8m8_u8m4 (vuint8m8_t src)
+{
+ return vget_u8m4(src, 1);
+}
+
+vuint16m2_t
+test_vset_v_u16m1_u16m2 (vuint16m2_t dest, vuint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m4_t
+test_vset_v_u16m1_u16m4 (vuint16m4_t dest, vuint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m4_t
+test_vset_v_u16m2_u16m4 (vuint16m4_t dest, vuint16m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m1_u16m8 (vuint16m8_t dest, vuint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m2_u16m8 (vuint16m8_t dest, vuint16m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m4_u16m8 (vuint16m8_t dest, vuint16m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m1_t
+test_vget_v_u16m2_u16m1 (vuint16m2_t src)
+{
+ return vget_u16m1(src, 1);
+}
+
+vuint16m1_t
+test_vget_v_u16m4_u16m1 (vuint16m4_t src)
+{
+ return vget_u16m1(src, 1);
+}
+
+vuint16m1_t
+test_vget_v_u16m8_u16m1 (vuint16m8_t src)
+{
+ return vget_u16m1(src, 1);
+}
+
+vuint16m2_t
+test_vget_v_u16m4_u16m2 (vuint16m4_t src)
+{
+ return vget_u16m2(src, 1);
+}
+
+vuint16m2_t
+test_vget_v_u16m8_u16m2 (vuint16m8_t src)
+{
+ return vget_u16m2(src, 1);
+}
+
+vuint16m4_t
+test_vget_v_u16m8_u16m4 (vuint16m8_t src)
+{
+ return vget_u16m4(src, 1);
+}
+
+vuint32m2_t
+test_vset_v_u32m1_u32m2 (vuint32m2_t dest, vuint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m4_t
+test_vset_v_u32m1_u32m4 (vuint32m4_t dest, vuint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m4_t
+test_vset_v_u32m2_u32m4 (vuint32m4_t dest, vuint32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m1_u32m8 (vuint32m8_t dest, vuint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m2_u32m8 (vuint32m8_t dest, vuint32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m4_u32m8 (vuint32m8_t dest, vuint32m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m1_t
+test_vget_v_u32m2_u32m1 (vuint32m2_t src)
+{
+ return vget_u32m1(src, 1);
+}
+
+vuint32m1_t
+test_vget_v_u32m4_u32m1 (vuint32m4_t src)
+{
+ return vget_u32m1(src, 1);
+}
+
+vuint32m1_t
+test_vget_v_u32m8_u32m1 (vuint32m8_t src)
+{
+ return vget_u32m1(src, 1);
+}
+
+vuint32m2_t
+test_vget_v_u32m4_u32m2 (vuint32m4_t src)
+{
+ return vget_u32m2(src, 1);
+}
+
+vuint32m2_t
+test_vget_v_u32m8_u32m2 (vuint32m8_t src)
+{
+ return vget_u32m2(src, 1);
+}
+
+vuint32m4_t
+test_vget_v_u32m8_u32m4 (vuint32m8_t src)
+{
+ return vget_u32m4(src, 1);
+}
+
+vuint64m2_t
+test_vset_v_u64m1_u64m2 (vuint64m2_t dest, vuint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m4_t
+test_vset_v_u64m1_u64m4 (vuint64m4_t dest, vuint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m4_t
+test_vset_v_u64m2_u64m4 (vuint64m4_t dest, vuint64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m1_u64m8 (vuint64m8_t dest, vuint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m2_u64m8 (vuint64m8_t dest, vuint64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m4_u64m8 (vuint64m8_t dest, vuint64m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m1_t
+test_vget_v_u64m2_u64m1 (vuint64m2_t src)
+{
+ return vget_u64m1(src, 1);
+}
+
+vuint64m1_t
+test_vget_v_u64m4_u64m1 (vuint64m4_t src)
+{
+ return vget_u64m1(src, 1);
+}
+
+vuint64m1_t
+test_vget_v_u64m8_u64m1 (vuint64m8_t src)
+{
+ return vget_u64m1(src, 1);
+}
+
+vuint64m2_t
+test_vget_v_u64m4_u64m2 (vuint64m4_t src)
+{
+ return vget_u64m2(src, 1);
+}
+
+vuint64m2_t
+test_vget_v_u64m8_u64m2 (vuint64m8_t src)
+{
+ return vget_u64m2(src, 1);
+}
+
+vuint64m4_t
+test_vget_v_u64m8_u64m4 (vuint64m8_t src)
+{
+ return vget_u64m4(src, 1);
+}
+
+vfloat32m2_t
+test_vset_v_f32m1_f32m2 (vfloat32m2_t dest, vfloat32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m4_t
+test_vset_v_f32m1_f32m4 (vfloat32m4_t dest, vfloat32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m4_t
+test_vset_v_f32m2_f32m4 (vfloat32m4_t dest, vfloat32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m1_f32m8 (vfloat32m8_t dest, vfloat32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m2_f32m8 (vfloat32m8_t dest, vfloat32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m4_f32m8 (vfloat32m8_t dest, vfloat32m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m1_t
+test_vget_v_f32m2_f32m1 (vfloat32m2_t src)
+{
+ return vget_f32m1(src, 1);
+}
+
+vfloat32m1_t
+test_vget_v_f32m4_f32m1 (vfloat32m4_t src)
+{
+ return vget_f32m1(src, 1);
+}
+
+vfloat32m1_t
+test_vget_v_f32m8_f32m1 (vfloat32m8_t src)
+{
+ return vget_f32m1(src, 1);
+}
+
+vfloat32m2_t
+test_vget_v_f32m4_f32m2 (vfloat32m4_t src)
+{
+ return vget_f32m2(src, 1);
+}
+
+vfloat32m2_t
+test_vget_v_f32m8_f32m2 (vfloat32m8_t src)
+{
+ return vget_f32m2(src, 1);
+}
+
+vfloat32m4_t
+test_vget_v_f32m8_f32m4 (vfloat32m8_t src)
+{
+ return vget_f32m4(src, 1);
+}
+
+vfloat64m2_t
+test_vset_v_f64m1_f64m2 (vfloat64m2_t dest, vfloat64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m4_t
+test_vset_v_f64m1_f64m4 (vfloat64m4_t dest, vfloat64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m4_t
+test_vset_v_f64m2_f64m4 (vfloat64m4_t dest, vfloat64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m1_f64m8 (vfloat64m8_t dest, vfloat64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m2_f64m8 (vfloat64m8_t dest, vfloat64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m4_f64m8 (vfloat64m8_t dest, vfloat64m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m1_t
+test_vget_v_f64m2_f64m1 (vfloat64m2_t src)
+{
+ return vget_f64m1(src, 1);
+}
+
+vfloat64m1_t
+test_vget_v_f64m4_f64m1 (vfloat64m4_t src)
+{
+ return vget_f64m1(src, 1);
+}
+
+vfloat64m1_t
+test_vget_v_f64m8_f64m1 (vfloat64m8_t src)
+{
+ return vget_f64m1(src, 1);
+}
+
+vfloat64m2_t
+test_vget_v_f64m4_f64m2 (vfloat64m4_t src)
+{
+ return vget_f64m2(src, 1);
+}
+
+vfloat64m2_t
+test_vget_v_f64m8_f64m2 (vfloat64m8_t src)
+{
+ return vget_f64m2(src, 1);
+}
+
+vfloat64m4_t
+test_vget_v_f64m8_f64m4 (vfloat64m8_t src)
+{
+ return vget_f64m4(src, 1);
+}
+/* { dg-final { scan-assembler-times {vmv1r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 60 } } */
+/* { dg-final { scan-assembler-times {vmv2r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 40 } } */
+/* { dg-final { scan-assembler-times {vmv4r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 20 } } */
+
new file mode 100644
@@ -0,0 +1,730 @@
+
+/* { dg-do compile } */
+/* { dg-skip-if "test vector intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */
+
+#include <stddef.h>
+#include <riscv_vector.h>
+
+
+vint8m2_t
+test_vset_v_i8m1_i8m2 (vint8m2_t dest, vint8m1_t val)
+{
+ return vset_v_i8m1_i8m2(dest, 1, val);
+}
+
+vint8m4_t
+test_vset_v_i8m1_i8m4 (vint8m4_t dest, vint8m1_t val)
+{
+ return vset_v_i8m1_i8m4(dest, 1, val);
+}
+
+vint8m4_t
+test_vset_v_i8m2_i8m4 (vint8m4_t dest, vint8m2_t val)
+{
+ return vset_v_i8m2_i8m4(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m1_i8m8 (vint8m8_t dest, vint8m1_t val)
+{
+ return vset_v_i8m1_i8m8(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m2_i8m8 (vint8m8_t dest, vint8m2_t val)
+{
+ return vset_v_i8m2_i8m8(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m4_i8m8 (vint8m8_t dest, vint8m4_t val)
+{
+ return vset_v_i8m4_i8m8(dest, 1, val);
+}
+
+vint8m1_t
+test_vget_v_i8m2_i8m1 (vint8m2_t src)
+{
+ return vget_v_i8m2_i8m1(src, 1);
+}
+
+vint8m1_t
+test_vget_v_i8m4_i8m1 (vint8m4_t src)
+{
+ return vget_v_i8m4_i8m1(src, 1);
+}
+
+vint8m1_t
+test_vget_v_i8m8_i8m1 (vint8m8_t src)
+{
+ return vget_v_i8m8_i8m1(src, 1);
+}
+
+vint8m2_t
+test_vget_v_i8m4_i8m2 (vint8m4_t src)
+{
+ return vget_v_i8m4_i8m2(src, 1);
+}
+
+vint8m2_t
+test_vget_v_i8m8_i8m2 (vint8m8_t src)
+{
+ return vget_v_i8m8_i8m2(src, 1);
+}
+
+vint8m4_t
+test_vget_v_i8m8_i8m4 (vint8m8_t src)
+{
+ return vget_v_i8m8_i8m4(src, 1);
+}
+
+vint16m2_t
+test_vset_v_i16m1_i16m2 (vint16m2_t dest, vint16m1_t val)
+{
+ return vset_v_i16m1_i16m2(dest, 1, val);
+}
+
+vint16m4_t
+test_vset_v_i16m1_i16m4 (vint16m4_t dest, vint16m1_t val)
+{
+ return vset_v_i16m1_i16m4(dest, 1, val);
+}
+
+vint16m4_t
+test_vset_v_i16m2_i16m4 (vint16m4_t dest, vint16m2_t val)
+{
+ return vset_v_i16m2_i16m4(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m1_i16m8 (vint16m8_t dest, vint16m1_t val)
+{
+ return vset_v_i16m1_i16m8(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m2_i16m8 (vint16m8_t dest, vint16m2_t val)
+{
+ return vset_v_i16m2_i16m8(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m4_i16m8 (vint16m8_t dest, vint16m4_t val)
+{
+ return vset_v_i16m4_i16m8(dest, 1, val);
+}
+
+vint16m1_t
+test_vget_v_i16m2_i16m1 (vint16m2_t src)
+{
+ return vget_v_i16m2_i16m1(src, 1);
+}
+
+vint16m1_t
+test_vget_v_i16m4_i16m1 (vint16m4_t src)
+{
+ return vget_v_i16m4_i16m1(src, 1);
+}
+
+vint16m1_t
+test_vget_v_i16m8_i16m1 (vint16m8_t src)
+{
+ return vget_v_i16m8_i16m1(src, 1);
+}
+
+vint16m2_t
+test_vget_v_i16m4_i16m2 (vint16m4_t src)
+{
+ return vget_v_i16m4_i16m2(src, 1);
+}
+
+vint16m2_t
+test_vget_v_i16m8_i16m2 (vint16m8_t src)
+{
+ return vget_v_i16m8_i16m2(src, 1);
+}
+
+vint16m4_t
+test_vget_v_i16m8_i16m4 (vint16m8_t src)
+{
+ return vget_v_i16m8_i16m4(src, 1);
+}
+
+vint32m2_t
+test_vset_v_i32m1_i32m2 (vint32m2_t dest, vint32m1_t val)
+{
+ return vset_v_i32m1_i32m2(dest, 1, val);
+}
+
+vint32m4_t
+test_vset_v_i32m1_i32m4 (vint32m4_t dest, vint32m1_t val)
+{
+ return vset_v_i32m1_i32m4(dest, 1, val);
+}
+
+vint32m4_t
+test_vset_v_i32m2_i32m4 (vint32m4_t dest, vint32m2_t val)
+{
+ return vset_v_i32m2_i32m4(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m1_i32m8 (vint32m8_t dest, vint32m1_t val)
+{
+ return vset_v_i32m1_i32m8(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m2_i32m8 (vint32m8_t dest, vint32m2_t val)
+{
+ return vset_v_i32m2_i32m8(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m4_i32m8 (vint32m8_t dest, vint32m4_t val)
+{
+ return vset_v_i32m4_i32m8(dest, 1, val);
+}
+
+vint32m1_t
+test_vget_v_i32m2_i32m1 (vint32m2_t src)
+{
+ return vget_v_i32m2_i32m1(src, 1);
+}
+
+vint32m1_t
+test_vget_v_i32m4_i32m1 (vint32m4_t src)
+{
+ return vget_v_i32m4_i32m1(src, 1);
+}
+
+vint32m1_t
+test_vget_v_i32m8_i32m1 (vint32m8_t src)
+{
+ return vget_v_i32m8_i32m1(src, 1);
+}
+
+vint32m2_t
+test_vget_v_i32m4_i32m2 (vint32m4_t src)
+{
+ return vget_v_i32m4_i32m2(src, 1);
+}
+
+vint32m2_t
+test_vget_v_i32m8_i32m2 (vint32m8_t src)
+{
+ return vget_v_i32m8_i32m2(src, 1);
+}
+
+vint32m4_t
+test_vget_v_i32m8_i32m4 (vint32m8_t src)
+{
+ return vget_v_i32m8_i32m4(src, 1);
+}
+
+vint64m2_t
+test_vset_v_i64m1_i64m2 (vint64m2_t dest, vint64m1_t val)
+{
+ return vset_v_i64m1_i64m2(dest, 1, val);
+}
+
+vint64m4_t
+test_vset_v_i64m1_i64m4 (vint64m4_t dest, vint64m1_t val)
+{
+ return vset_v_i64m1_i64m4(dest, 1, val);
+}
+
+vint64m4_t
+test_vset_v_i64m2_i64m4 (vint64m4_t dest, vint64m2_t val)
+{
+ return vset_v_i64m2_i64m4(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m1_i64m8 (vint64m8_t dest, vint64m1_t val)
+{
+ return vset_v_i64m1_i64m8(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m2_i64m8 (vint64m8_t dest, vint64m2_t val)
+{
+ return vset_v_i64m2_i64m8(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m4_i64m8 (vint64m8_t dest, vint64m4_t val)
+{
+ return vset_v_i64m4_i64m8(dest, 1, val);
+}
+
+vint64m1_t
+test_vget_v_i64m2_i64m1 (vint64m2_t src)
+{
+ return vget_v_i64m2_i64m1(src, 1);
+}
+
+vint64m1_t
+test_vget_v_i64m4_i64m1 (vint64m4_t src)
+{
+ return vget_v_i64m4_i64m1(src, 1);
+}
+
+vint64m1_t
+test_vget_v_i64m8_i64m1 (vint64m8_t src)
+{
+ return vget_v_i64m8_i64m1(src, 1);
+}
+
+vint64m2_t
+test_vget_v_i64m4_i64m2 (vint64m4_t src)
+{
+ return vget_v_i64m4_i64m2(src, 1);
+}
+
+vint64m2_t
+test_vget_v_i64m8_i64m2 (vint64m8_t src)
+{
+ return vget_v_i64m8_i64m2(src, 1);
+}
+
+vint64m4_t
+test_vget_v_i64m8_i64m4 (vint64m8_t src)
+{
+ return vget_v_i64m8_i64m4(src, 1);
+}
+
+vuint8m2_t
+test_vset_v_u8m1_u8m2 (vuint8m2_t dest, vuint8m1_t val)
+{
+ return vset_v_u8m1_u8m2(dest, 1, val);
+}
+
+vuint8m4_t
+test_vset_v_u8m1_u8m4 (vuint8m4_t dest, vuint8m1_t val)
+{
+ return vset_v_u8m1_u8m4(dest, 1, val);
+}
+
+vuint8m4_t
+test_vset_v_u8m2_u8m4 (vuint8m4_t dest, vuint8m2_t val)
+{
+ return vset_v_u8m2_u8m4(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m1_u8m8 (vuint8m8_t dest, vuint8m1_t val)
+{
+ return vset_v_u8m1_u8m8(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m2_u8m8 (vuint8m8_t dest, vuint8m2_t val)
+{
+ return vset_v_u8m2_u8m8(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m4_u8m8 (vuint8m8_t dest, vuint8m4_t val)
+{
+ return vset_v_u8m4_u8m8(dest, 1, val);
+}
+
+vuint8m1_t
+test_vget_v_u8m2_u8m1 (vuint8m2_t src)
+{
+ return vget_v_u8m2_u8m1(src, 1);
+}
+
+vuint8m1_t
+test_vget_v_u8m4_u8m1 (vuint8m4_t src)
+{
+ return vget_v_u8m4_u8m1(src, 1);
+}
+
+vuint8m1_t
+test_vget_v_u8m8_u8m1 (vuint8m8_t src)
+{
+ return vget_v_u8m8_u8m1(src, 1);
+}
+
+vuint8m2_t
+test_vget_v_u8m4_u8m2 (vuint8m4_t src)
+{
+ return vget_v_u8m4_u8m2(src, 1);
+}
+
+vuint8m2_t
+test_vget_v_u8m8_u8m2 (vuint8m8_t src)
+{
+ return vget_v_u8m8_u8m2(src, 1);
+}
+
+vuint8m4_t
+test_vget_v_u8m8_u8m4 (vuint8m8_t src)
+{
+ return vget_v_u8m8_u8m4(src, 1);
+}
+
+vuint16m2_t
+test_vset_v_u16m1_u16m2 (vuint16m2_t dest, vuint16m1_t val)
+{
+ return vset_v_u16m1_u16m2(dest, 1, val);
+}
+
+vuint16m4_t
+test_vset_v_u16m1_u16m4 (vuint16m4_t dest, vuint16m1_t val)
+{
+ return vset_v_u16m1_u16m4(dest, 1, val);
+}
+
+vuint16m4_t
+test_vset_v_u16m2_u16m4 (vuint16m4_t dest, vuint16m2_t val)
+{
+ return vset_v_u16m2_u16m4(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m1_u16m8 (vuint16m8_t dest, vuint16m1_t val)
+{
+ return vset_v_u16m1_u16m8(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m2_u16m8 (vuint16m8_t dest, vuint16m2_t val)
+{
+ return vset_v_u16m2_u16m8(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m4_u16m8 (vuint16m8_t dest, vuint16m4_t val)
+{
+ return vset_v_u16m4_u16m8(dest, 1, val);
+}
+
+vuint16m1_t
+test_vget_v_u16m2_u16m1 (vuint16m2_t src)
+{
+ return vget_v_u16m2_u16m1(src, 1);
+}
+
+vuint16m1_t
+test_vget_v_u16m4_u16m1 (vuint16m4_t src)
+{
+ return vget_v_u16m4_u16m1(src, 1);
+}
+
+vuint16m1_t
+test_vget_v_u16m8_u16m1 (vuint16m8_t src)
+{
+ return vget_v_u16m8_u16m1(src, 1);
+}
+
+vuint16m2_t
+test_vget_v_u16m4_u16m2 (vuint16m4_t src)
+{
+ return vget_v_u16m4_u16m2(src, 1);
+}
+
+vuint16m2_t
+test_vget_v_u16m8_u16m2 (vuint16m8_t src)
+{
+ return vget_v_u16m8_u16m2(src, 1);
+}
+
+vuint16m4_t
+test_vget_v_u16m8_u16m4 (vuint16m8_t src)
+{
+ return vget_v_u16m8_u16m4(src, 1);
+}
+
+vuint32m2_t
+test_vset_v_u32m1_u32m2 (vuint32m2_t dest, vuint32m1_t val)
+{
+ return vset_v_u32m1_u32m2(dest, 1, val);
+}
+
+vuint32m4_t
+test_vset_v_u32m1_u32m4 (vuint32m4_t dest, vuint32m1_t val)
+{
+ return vset_v_u32m1_u32m4(dest, 1, val);
+}
+
+vuint32m4_t
+test_vset_v_u32m2_u32m4 (vuint32m4_t dest, vuint32m2_t val)
+{
+ return vset_v_u32m2_u32m4(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m1_u32m8 (vuint32m8_t dest, vuint32m1_t val)
+{
+ return vset_v_u32m1_u32m8(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m2_u32m8 (vuint32m8_t dest, vuint32m2_t val)
+{
+ return vset_v_u32m2_u32m8(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m4_u32m8 (vuint32m8_t dest, vuint32m4_t val)
+{
+ return vset_v_u32m4_u32m8(dest, 1, val);
+}
+
+vuint32m1_t
+test_vget_v_u32m2_u32m1 (vuint32m2_t src)
+{
+ return vget_v_u32m2_u32m1(src, 1);
+}
+
+vuint32m1_t
+test_vget_v_u32m4_u32m1 (vuint32m4_t src)
+{
+ return vget_v_u32m4_u32m1(src, 1);
+}
+
+vuint32m1_t
+test_vget_v_u32m8_u32m1 (vuint32m8_t src)
+{
+ return vget_v_u32m8_u32m1(src, 1);
+}
+
+vuint32m2_t
+test_vget_v_u32m4_u32m2 (vuint32m4_t src)
+{
+ return vget_v_u32m4_u32m2(src, 1);
+}
+
+vuint32m2_t
+test_vget_v_u32m8_u32m2 (vuint32m8_t src)
+{
+ return vget_v_u32m8_u32m2(src, 1);
+}
+
+vuint32m4_t
+test_vget_v_u32m8_u32m4 (vuint32m8_t src)
+{
+ return vget_v_u32m8_u32m4(src, 1);
+}
+
+vuint64m2_t
+test_vset_v_u64m1_u64m2 (vuint64m2_t dest, vuint64m1_t val)
+{
+ return vset_v_u64m1_u64m2(dest, 1, val);
+}
+
+vuint64m4_t
+test_vset_v_u64m1_u64m4 (vuint64m4_t dest, vuint64m1_t val)
+{
+ return vset_v_u64m1_u64m4(dest, 1, val);
+}
+
+vuint64m4_t
+test_vset_v_u64m2_u64m4 (vuint64m4_t dest, vuint64m2_t val)
+{
+ return vset_v_u64m2_u64m4(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m1_u64m8 (vuint64m8_t dest, vuint64m1_t val)
+{
+ return vset_v_u64m1_u64m8(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m2_u64m8 (vuint64m8_t dest, vuint64m2_t val)
+{
+ return vset_v_u64m2_u64m8(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m4_u64m8 (vuint64m8_t dest, vuint64m4_t val)
+{
+ return vset_v_u64m4_u64m8(dest, 1, val);
+}
+
+vuint64m1_t
+test_vget_v_u64m2_u64m1 (vuint64m2_t src)
+{
+ return vget_v_u64m2_u64m1(src, 1);
+}
+
+vuint64m1_t
+test_vget_v_u64m4_u64m1 (vuint64m4_t src)
+{
+ return vget_v_u64m4_u64m1(src, 1);
+}
+
+vuint64m1_t
+test_vget_v_u64m8_u64m1 (vuint64m8_t src)
+{
+ return vget_v_u64m8_u64m1(src, 1);
+}
+
+vuint64m2_t
+test_vget_v_u64m4_u64m2 (vuint64m4_t src)
+{
+ return vget_v_u64m4_u64m2(src, 1);
+}
+
+vuint64m2_t
+test_vget_v_u64m8_u64m2 (vuint64m8_t src)
+{
+ return vget_v_u64m8_u64m2(src, 1);
+}
+
+vuint64m4_t
+test_vget_v_u64m8_u64m4 (vuint64m8_t src)
+{
+ return vget_v_u64m8_u64m4(src, 1);
+}
+
+vfloat32m2_t
+test_vset_v_f32m1_f32m2 (vfloat32m2_t dest, vfloat32m1_t val)
+{
+ return vset_v_f32m1_f32m2(dest, 1, val);
+}
+
+vfloat32m4_t
+test_vset_v_f32m1_f32m4 (vfloat32m4_t dest, vfloat32m1_t val)
+{
+ return vset_v_f32m1_f32m4(dest, 1, val);
+}
+
+vfloat32m4_t
+test_vset_v_f32m2_f32m4 (vfloat32m4_t dest, vfloat32m2_t val)
+{
+ return vset_v_f32m2_f32m4(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m1_f32m8 (vfloat32m8_t dest, vfloat32m1_t val)
+{
+ return vset_v_f32m1_f32m8(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m2_f32m8 (vfloat32m8_t dest, vfloat32m2_t val)
+{
+ return vset_v_f32m2_f32m8(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m4_f32m8 (vfloat32m8_t dest, vfloat32m4_t val)
+{
+ return vset_v_f32m4_f32m8(dest, 1, val);
+}
+
+vfloat32m1_t
+test_vget_v_f32m2_f32m1 (vfloat32m2_t src)
+{
+ return vget_v_f32m2_f32m1(src, 1);
+}
+
+vfloat32m1_t
+test_vget_v_f32m4_f32m1 (vfloat32m4_t src)
+{
+ return vget_v_f32m4_f32m1(src, 1);
+}
+
+vfloat32m1_t
+test_vget_v_f32m8_f32m1 (vfloat32m8_t src)
+{
+ return vget_v_f32m8_f32m1(src, 1);
+}
+
+vfloat32m2_t
+test_vget_v_f32m4_f32m2 (vfloat32m4_t src)
+{
+ return vget_v_f32m4_f32m2(src, 1);
+}
+
+vfloat32m2_t
+test_vget_v_f32m8_f32m2 (vfloat32m8_t src)
+{
+ return vget_v_f32m8_f32m2(src, 1);
+}
+
+vfloat32m4_t
+test_vget_v_f32m8_f32m4 (vfloat32m8_t src)
+{
+ return vget_v_f32m8_f32m4(src, 1);
+}
+
+vfloat64m2_t
+test_vset_v_f64m1_f64m2 (vfloat64m2_t dest, vfloat64m1_t val)
+{
+ return vset_v_f64m1_f64m2(dest, 1, val);
+}
+
+vfloat64m4_t
+test_vset_v_f64m1_f64m4 (vfloat64m4_t dest, vfloat64m1_t val)
+{
+ return vset_v_f64m1_f64m4(dest, 1, val);
+}
+
+vfloat64m4_t
+test_vset_v_f64m2_f64m4 (vfloat64m4_t dest, vfloat64m2_t val)
+{
+ return vset_v_f64m2_f64m4(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m1_f64m8 (vfloat64m8_t dest, vfloat64m1_t val)
+{
+ return vset_v_f64m1_f64m8(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m2_f64m8 (vfloat64m8_t dest, vfloat64m2_t val)
+{
+ return vset_v_f64m2_f64m8(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m4_f64m8 (vfloat64m8_t dest, vfloat64m4_t val)
+{
+ return vset_v_f64m4_f64m8(dest, 1, val);
+}
+
+vfloat64m1_t
+test_vget_v_f64m2_f64m1 (vfloat64m2_t src)
+{
+ return vget_v_f64m2_f64m1(src, 1);
+}
+
+vfloat64m1_t
+test_vget_v_f64m4_f64m1 (vfloat64m4_t src)
+{
+ return vget_v_f64m4_f64m1(src, 1);
+}
+
+vfloat64m1_t
+test_vget_v_f64m8_f64m1 (vfloat64m8_t src)
+{
+ return vget_v_f64m8_f64m1(src, 1);
+}
+
+vfloat64m2_t
+test_vget_v_f64m4_f64m2 (vfloat64m4_t src)
+{
+ return vget_v_f64m4_f64m2(src, 1);
+}
+
+vfloat64m2_t
+test_vget_v_f64m8_f64m2 (vfloat64m8_t src)
+{
+ return vget_v_f64m8_f64m2(src, 1);
+}
+
+vfloat64m4_t
+test_vget_v_f64m8_f64m4 (vfloat64m8_t src)
+{
+ return vget_v_f64m8_f64m4(src, 1);
+}
+/* { dg-final { scan-assembler-times {vmv1r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 60 } } */
+/* { dg-final { scan-assembler-times {vmv2r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 40 } } */
+/* { dg-final { scan-assembler-times {vmv4r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 20 } } */