From patchwork Tue May 31 08:50:03 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?b?6ZKf5bGF5ZOy?= X-Patchwork-Id: 54554 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id 50A193955CA1 for ; Tue, 31 May 2022 08:58:58 +0000 (GMT) X-Original-To: gcc-patches@gcc.gnu.org Delivered-To: gcc-patches@gcc.gnu.org Received: from smtpbg150.qq.com (smtpbg150.qq.com [18.132.163.193]) by sourceware.org (Postfix) with ESMTPS id ED2623836654 for ; Tue, 31 May 2022 08:50:56 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org ED2623836654 Authentication-Results: sourceware.org; dmarc=none (p=none dis=none) header.from=rivai.ai Authentication-Results: sourceware.org; spf=pass smtp.mailfrom=rivai.ai X-QQ-mid: bizesmtp84t1653987052tsa9oceb Received: from server1.localdomain ( [42.247.22.65]) by bizesmtp.qq.com (ESMTP) with id ; Tue, 31 May 2022 16:50:51 +0800 (CST) X-QQ-SSF: 01400000002000B0F000000A0000000 X-QQ-FEAT: 4LFlwc+MlXnEbUX9d0YsY5r9y8I1EdUsF4MQ2xZ37/Ke1lmgeVPazM/begawA JG68a9IPM78+os8v4tRg6ry8RvWveCCZ9bsvv4g6zcWrVgquokbNwfnpRQYHNSezwkCFjPh Rwj6+zlad8qaNg+tWCRpdciA9f8ZfkJ03wRuVKKYwtuMUED+rBiggp9vjoV9JzpXQ7rs4e0 B8q4IR52h2PKxbPcODkHBC1QfJmGt7CEbLGgiqwDRfpB4DM3lA9AyIl9DKD9z3184PQLVQi IC8+botOO61NxvzMiZY1yK9+LmWMjNFNpY6WXWK8DyRwM1qumDI5COCerHRKh8cxTsd93kD FiRYi3rCd6AfaJ66Z/UPmxiaJHsRg7QUur6DMf/ X-QQ-GoodBg: 2 From: juzhe.zhong@rivai.ai To: gcc-patches@gcc.gnu.org Subject: [PATCH 12/21] Add set get intrinsic support Date: Tue, 31 May 2022 16:50:03 +0800 Message-Id: <20220531085012.269719-13-juzhe.zhong@rivai.ai> X-Mailer: git-send-email 2.36.1 In-Reply-To: <20220531085012.269719-1-juzhe.zhong@rivai.ai> References: <20220531085012.269719-1-juzhe.zhong@rivai.ai> MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:rivai.ai:qybgforeign:qybgforeign3 X-QQ-Bgrelay: 1 X-Spam-Status: No, score=-11.4 required=5.0 tests=BAYES_00, GIT_PATCH_0, KAM_DMARC_STATUS, KAM_SHORT, RCVD_IN_DNSWL_NONE, RCVD_IN_MSPIKE_H2, SPF_HELO_PASS, SPF_PASS, TXREP, T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: gcc-patches@gcc.gnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Gcc-patches mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: kito.cheng@gmail.com, juzhe.zhong@rivai.ai Errors-To: gcc-patches-bounces+patchwork=sourceware.org@gcc.gnu.org Sender: "Gcc-patches" From: zhongjuzhe gcc/ChangeLog: * config/riscv/riscv-vector-builtins-functions.cc (vset::assemble_name): New function. (vset::get_argument_types): New function. (vset::expand): New function. (vget::assemble_name): New function. (vget::get_argument_types): New function. (vget::expand): New function. * config/riscv/riscv-vector-builtins-functions.def (vset): New macro define. (vget): New macro define. * config/riscv/riscv-vector-builtins-functions.h (class vset): New class. (class vget): New class. gcc/testsuite/ChangeLog: * g++.target/riscv/rvv/set-get.C: New test. * gcc.target/riscv/rvv/intrinsic/set-get.c: New test. --- .../riscv/riscv-vector-builtins-functions.cc | 73 ++ .../riscv/riscv-vector-builtins-functions.def | 6 + .../riscv/riscv-vector-builtins-functions.h | 28 + gcc/testsuite/g++.target/riscv/rvv/set-get.C | 730 ++++++++++++++++++ .../gcc.target/riscv/rvv/intrinsic/set-get.c | 730 ++++++++++++++++++ 5 files changed, 1567 insertions(+) create mode 100644 gcc/testsuite/g++.target/riscv/rvv/set-get.C create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/intrinsic/set-get.c diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.cc b/gcc/config/riscv/riscv-vector-builtins-functions.cc index fa39eedcd86..9d2895c3d3e 100644 --- a/gcc/config/riscv/riscv-vector-builtins-functions.cc +++ b/gcc/config/riscv/riscv-vector-builtins-functions.cc @@ -1510,6 +1510,79 @@ vundefined::expand (const function_instance &, tree, rtx target) const return target; } +/* A function implementation for vset functions. */ +char * +vset::assemble_name (function_instance &instance) +{ + machine_mode tmode = instance.get_arg_pattern ().arg_list[0]; + machine_mode smode = instance.get_arg_pattern ().arg_list[2]; + if (GET_MODE_INNER (tmode) != GET_MODE_INNER (smode)) + return nullptr; + + if (tmode == smode) + return nullptr; + + if (known_lt (GET_MODE_SIZE (tmode), GET_MODE_SIZE (smode))) + return nullptr; + + intrinsic_rename (instance, 0, 2); + append_name (instance.get_base_name ()); + return finish_name (); +} + +void +vset::get_argument_types (const function_instance &instance, + vec &argument_types) const +{ + misc::get_argument_types (instance, argument_types); + argument_types.quick_push (size_type_node); + argument_types.quick_push (get_dt_t_with_index (instance, 2)); +} + +rtx +vset::expand (const function_instance &instance, tree exp, rtx target) const +{ + enum insn_code icode = code_for_vset (instance.get_arg_pattern ().arg_list[0]); + return expand_builtin_insn (icode, exp, target, instance); +} + +/* A function implementation for vget functions. */ +char * +vget::assemble_name (function_instance &instance) +{ + machine_mode tmode = instance.get_arg_pattern ().arg_list[0]; + machine_mode smode = instance.get_arg_pattern ().arg_list[1]; + if (GET_MODE_INNER (tmode) != GET_MODE_INNER (smode)) + return nullptr; + + if (tmode == smode) + return nullptr; + + if (known_gt (GET_MODE_SIZE (tmode), GET_MODE_SIZE (smode))) + return nullptr; + + bool unsigned_p = instance.get_data_type_list ()[0] == DT_unsigned; + intrinsic_rename (instance, 0, 1); + append_name (instance.get_base_name ()); + append_name (mode2data_type_str (tmode, unsigned_p, false)); + return finish_name (); +} + +void +vget::get_argument_types (const function_instance &instance, + vec &argument_types) const +{ + misc::get_argument_types (instance, argument_types); + argument_types.quick_push (size_type_node); +} + +rtx +vget::expand (const function_instance &instance, tree exp, rtx target) const +{ + enum insn_code icode = code_for_vget (instance.get_arg_pattern ().arg_list[0]); + return expand_builtin_insn (icode, exp, target, instance); +} + /* A function implementation for loadstore functions. */ char * loadstore::assemble_name (function_instance &instance) diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.def b/gcc/config/riscv/riscv-vector-builtins-functions.def index deb32ccd031..739ae60fff5 100644 --- a/gcc/config/riscv/riscv-vector-builtins-functions.def +++ b/gcc/config/riscv/riscv-vector-builtins-functions.def @@ -56,6 +56,12 @@ DEF_RVV_FUNCTION(vlmul_trunc, vlmul_trunc, (2, VITER(VLMULTRUNC, signed), VITER( DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VI, signed)), PAT_none, PRED_none, OP_none) DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VI, unsigned)), PAT_none, PRED_none, OP_none) DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VF, signed)), PAT_none, PRED_none, OP_none) +DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETI, signed), VATTR(0, VSETI, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v) +DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETI, unsigned), VATTR(0, VSETI, unsigned), VITER(VFULL, unsigned)), PAT_none, PRED_none, OP_v) +DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETF, signed), VATTR(0, VSETF, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v) +DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETI, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v) +DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETI, unsigned), VITER(VFULL, unsigned)), PAT_none, PRED_none, OP_v) +DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETF, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v) /* 7. Vector Loads and Stores. */ DEF_RVV_FUNCTION(vle, vle, (2, VITER(VI, signed), VATTR(0, VSUB, c_ptr)), pat_mask_tail, pred_all, OP_v) DEF_RVV_FUNCTION(vle, vle, (2, VITER(VI, unsigned), VATTR(0, VSUB, c_uptr)), pat_mask_tail, pred_all, OP_v) diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.h b/gcc/config/riscv/riscv-vector-builtins-functions.h index c9e1b2a34ca..90063005024 100644 --- a/gcc/config/riscv/riscv-vector-builtins-functions.h +++ b/gcc/config/riscv/riscv-vector-builtins-functions.h @@ -584,6 +584,34 @@ public: virtual rtx expand (const function_instance &, tree, rtx) const override; }; +/* A function_base for vset functions. */ +class vset : public misc +{ +public: + // use the same construction function as the misc + using misc::misc; + + virtual char * assemble_name (function_instance &) override; + + virtual void get_argument_types (const function_instance &, vec &) const override; + + virtual rtx expand (const function_instance &, tree, rtx) const override; +}; + +/* A function_base for vget functions. */ +class vget : public misc +{ +public: + // use the same construction function as the misc + using misc::misc; + + virtual char * assemble_name (function_instance &) override; + + virtual void get_argument_types (const function_instance &, vec &) const override; + + virtual rtx expand (const function_instance &, tree, rtx) const override; +}; + /* A function_base for loadstore functions. */ class loadstore : public function_builder { diff --git a/gcc/testsuite/g++.target/riscv/rvv/set-get.C b/gcc/testsuite/g++.target/riscv/rvv/set-get.C new file mode 100644 index 00000000000..7c8deb96a39 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/set-get.C @@ -0,0 +1,730 @@ +/* { dg-do compile } */ +/* { dg-skip-if "test vector intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */ + +#include +#include + + +vint8m2_t +test_vset_v_i8m1_i8m2 (vint8m2_t dest, vint8m1_t val) +{ + return vset(dest, 1, val); +} + +vint8m4_t +test_vset_v_i8m1_i8m4 (vint8m4_t dest, vint8m1_t val) +{ + return vset(dest, 1, val); +} + +vint8m4_t +test_vset_v_i8m2_i8m4 (vint8m4_t dest, vint8m2_t val) +{ + return vset(dest, 1, val); +} + +vint8m8_t +test_vset_v_i8m1_i8m8 (vint8m8_t dest, vint8m1_t val) +{ + return vset(dest, 1, val); +} + +vint8m8_t +test_vset_v_i8m2_i8m8 (vint8m8_t dest, vint8m2_t val) +{ + return vset(dest, 1, val); +} + +vint8m8_t +test_vset_v_i8m4_i8m8 (vint8m8_t dest, vint8m4_t val) +{ + return vset(dest, 1, val); +} + +vint8m1_t +test_vget_v_i8m2_i8m1 (vint8m2_t src) +{ + return vget_i8m1(src, 1); +} + +vint8m1_t +test_vget_v_i8m4_i8m1 (vint8m4_t src) +{ + return vget_i8m1(src, 1); +} + +vint8m1_t +test_vget_v_i8m8_i8m1 (vint8m8_t src) +{ + return vget_i8m1(src, 1); +} + +vint8m2_t +test_vget_v_i8m4_i8m2 (vint8m4_t src) +{ + return vget_i8m2(src, 1); +} + +vint8m2_t +test_vget_v_i8m8_i8m2 (vint8m8_t src) +{ + return vget_i8m2(src, 1); +} + +vint8m4_t +test_vget_v_i8m8_i8m4 (vint8m8_t src) +{ + return vget_i8m4(src, 1); +} + +vint16m2_t +test_vset_v_i16m1_i16m2 (vint16m2_t dest, vint16m1_t val) +{ + return vset(dest, 1, val); +} + +vint16m4_t +test_vset_v_i16m1_i16m4 (vint16m4_t dest, vint16m1_t val) +{ + return vset(dest, 1, val); +} + +vint16m4_t +test_vset_v_i16m2_i16m4 (vint16m4_t dest, vint16m2_t val) +{ + return vset(dest, 1, val); +} + +vint16m8_t +test_vset_v_i16m1_i16m8 (vint16m8_t dest, vint16m1_t val) +{ + return vset(dest, 1, val); +} + +vint16m8_t +test_vset_v_i16m2_i16m8 (vint16m8_t dest, vint16m2_t val) +{ + return vset(dest, 1, val); +} + +vint16m8_t +test_vset_v_i16m4_i16m8 (vint16m8_t dest, vint16m4_t val) +{ + return vset(dest, 1, val); +} + +vint16m1_t +test_vget_v_i16m2_i16m1 (vint16m2_t src) +{ + return vget_i16m1(src, 1); +} + +vint16m1_t +test_vget_v_i16m4_i16m1 (vint16m4_t src) +{ + return vget_i16m1(src, 1); +} + +vint16m1_t +test_vget_v_i16m8_i16m1 (vint16m8_t src) +{ + return vget_i16m1(src, 1); +} + +vint16m2_t +test_vget_v_i16m4_i16m2 (vint16m4_t src) +{ + return vget_i16m2(src, 1); +} + +vint16m2_t +test_vget_v_i16m8_i16m2 (vint16m8_t src) +{ + return vget_i16m2(src, 1); +} + +vint16m4_t +test_vget_v_i16m8_i16m4 (vint16m8_t src) +{ + return vget_i16m4(src, 1); +} + +vint32m2_t +test_vset_v_i32m1_i32m2 (vint32m2_t dest, vint32m1_t val) +{ + return vset(dest, 1, val); +} + +vint32m4_t +test_vset_v_i32m1_i32m4 (vint32m4_t dest, vint32m1_t val) +{ + return vset(dest, 1, val); +} + +vint32m4_t +test_vset_v_i32m2_i32m4 (vint32m4_t dest, vint32m2_t val) +{ + return vset(dest, 1, val); +} + +vint32m8_t +test_vset_v_i32m1_i32m8 (vint32m8_t dest, vint32m1_t val) +{ + return vset(dest, 1, val); +} + +vint32m8_t +test_vset_v_i32m2_i32m8 (vint32m8_t dest, vint32m2_t val) +{ + return vset(dest, 1, val); +} + +vint32m8_t +test_vset_v_i32m4_i32m8 (vint32m8_t dest, vint32m4_t val) +{ + return vset(dest, 1, val); +} + +vint32m1_t +test_vget_v_i32m2_i32m1 (vint32m2_t src) +{ + return vget_i32m1(src, 1); +} + +vint32m1_t +test_vget_v_i32m4_i32m1 (vint32m4_t src) +{ + return vget_i32m1(src, 1); +} + +vint32m1_t +test_vget_v_i32m8_i32m1 (vint32m8_t src) +{ + return vget_i32m1(src, 1); +} + +vint32m2_t +test_vget_v_i32m4_i32m2 (vint32m4_t src) +{ + return vget_i32m2(src, 1); +} + +vint32m2_t +test_vget_v_i32m8_i32m2 (vint32m8_t src) +{ + return vget_i32m2(src, 1); +} + +vint32m4_t +test_vget_v_i32m8_i32m4 (vint32m8_t src) +{ + return vget_i32m4(src, 1); +} + +vint64m2_t +test_vset_v_i64m1_i64m2 (vint64m2_t dest, vint64m1_t val) +{ + return vset(dest, 1, val); +} + +vint64m4_t +test_vset_v_i64m1_i64m4 (vint64m4_t dest, vint64m1_t val) +{ + return vset(dest, 1, val); +} + +vint64m4_t +test_vset_v_i64m2_i64m4 (vint64m4_t dest, vint64m2_t val) +{ + return vset(dest, 1, val); +} + +vint64m8_t +test_vset_v_i64m1_i64m8 (vint64m8_t dest, vint64m1_t val) +{ + return vset(dest, 1, val); +} + +vint64m8_t +test_vset_v_i64m2_i64m8 (vint64m8_t dest, vint64m2_t val) +{ + return vset(dest, 1, val); +} + +vint64m8_t +test_vset_v_i64m4_i64m8 (vint64m8_t dest, vint64m4_t val) +{ + return vset(dest, 1, val); +} + +vint64m1_t +test_vget_v_i64m2_i64m1 (vint64m2_t src) +{ + return vget_i64m1(src, 1); +} + +vint64m1_t +test_vget_v_i64m4_i64m1 (vint64m4_t src) +{ + return vget_i64m1(src, 1); +} + +vint64m1_t +test_vget_v_i64m8_i64m1 (vint64m8_t src) +{ + return vget_i64m1(src, 1); +} + +vint64m2_t +test_vget_v_i64m4_i64m2 (vint64m4_t src) +{ + return vget_i64m2(src, 1); +} + +vint64m2_t +test_vget_v_i64m8_i64m2 (vint64m8_t src) +{ + return vget_i64m2(src, 1); +} + +vint64m4_t +test_vget_v_i64m8_i64m4 (vint64m8_t src) +{ + return vget_i64m4(src, 1); +} + +vuint8m2_t +test_vset_v_u8m1_u8m2 (vuint8m2_t dest, vuint8m1_t val) +{ + return vset(dest, 1, val); +} + +vuint8m4_t +test_vset_v_u8m1_u8m4 (vuint8m4_t dest, vuint8m1_t val) +{ + return vset(dest, 1, val); +} + +vuint8m4_t +test_vset_v_u8m2_u8m4 (vuint8m4_t dest, vuint8m2_t val) +{ + return vset(dest, 1, val); +} + +vuint8m8_t +test_vset_v_u8m1_u8m8 (vuint8m8_t dest, vuint8m1_t val) +{ + return vset(dest, 1, val); +} + +vuint8m8_t +test_vset_v_u8m2_u8m8 (vuint8m8_t dest, vuint8m2_t val) +{ + return vset(dest, 1, val); +} + +vuint8m8_t +test_vset_v_u8m4_u8m8 (vuint8m8_t dest, vuint8m4_t val) +{ + return vset(dest, 1, val); +} + +vuint8m1_t +test_vget_v_u8m2_u8m1 (vuint8m2_t src) +{ + return vget_u8m1(src, 1); +} + +vuint8m1_t +test_vget_v_u8m4_u8m1 (vuint8m4_t src) +{ + return vget_u8m1(src, 1); +} + +vuint8m1_t +test_vget_v_u8m8_u8m1 (vuint8m8_t src) +{ + return vget_u8m1(src, 1); +} + +vuint8m2_t +test_vget_v_u8m4_u8m2 (vuint8m4_t src) +{ + return vget_u8m2(src, 1); +} + +vuint8m2_t +test_vget_v_u8m8_u8m2 (vuint8m8_t src) +{ + return vget_u8m2(src, 1); +} + +vuint8m4_t +test_vget_v_u8m8_u8m4 (vuint8m8_t src) +{ + return vget_u8m4(src, 1); +} + +vuint16m2_t +test_vset_v_u16m1_u16m2 (vuint16m2_t dest, vuint16m1_t val) +{ + return vset(dest, 1, val); +} + +vuint16m4_t +test_vset_v_u16m1_u16m4 (vuint16m4_t dest, vuint16m1_t val) +{ + return vset(dest, 1, val); +} + +vuint16m4_t +test_vset_v_u16m2_u16m4 (vuint16m4_t dest, vuint16m2_t val) +{ + return vset(dest, 1, val); +} + +vuint16m8_t +test_vset_v_u16m1_u16m8 (vuint16m8_t dest, vuint16m1_t val) +{ + return vset(dest, 1, val); +} + +vuint16m8_t +test_vset_v_u16m2_u16m8 (vuint16m8_t dest, vuint16m2_t val) +{ + return vset(dest, 1, val); +} + +vuint16m8_t +test_vset_v_u16m4_u16m8 (vuint16m8_t dest, vuint16m4_t val) +{ + return vset(dest, 1, val); +} + +vuint16m1_t +test_vget_v_u16m2_u16m1 (vuint16m2_t src) +{ + return vget_u16m1(src, 1); +} + +vuint16m1_t +test_vget_v_u16m4_u16m1 (vuint16m4_t src) +{ + return vget_u16m1(src, 1); +} + +vuint16m1_t +test_vget_v_u16m8_u16m1 (vuint16m8_t src) +{ + return vget_u16m1(src, 1); +} + +vuint16m2_t +test_vget_v_u16m4_u16m2 (vuint16m4_t src) +{ + return vget_u16m2(src, 1); +} + +vuint16m2_t +test_vget_v_u16m8_u16m2 (vuint16m8_t src) +{ + return vget_u16m2(src, 1); +} + +vuint16m4_t +test_vget_v_u16m8_u16m4 (vuint16m8_t src) +{ + return vget_u16m4(src, 1); +} + +vuint32m2_t +test_vset_v_u32m1_u32m2 (vuint32m2_t dest, vuint32m1_t val) +{ + return vset(dest, 1, val); +} + +vuint32m4_t +test_vset_v_u32m1_u32m4 (vuint32m4_t dest, vuint32m1_t val) +{ + return vset(dest, 1, val); +} + +vuint32m4_t +test_vset_v_u32m2_u32m4 (vuint32m4_t dest, vuint32m2_t val) +{ + return vset(dest, 1, val); +} + +vuint32m8_t +test_vset_v_u32m1_u32m8 (vuint32m8_t dest, vuint32m1_t val) +{ + return vset(dest, 1, val); +} + +vuint32m8_t +test_vset_v_u32m2_u32m8 (vuint32m8_t dest, vuint32m2_t val) +{ + return vset(dest, 1, val); +} + +vuint32m8_t +test_vset_v_u32m4_u32m8 (vuint32m8_t dest, vuint32m4_t val) +{ + return vset(dest, 1, val); +} + +vuint32m1_t +test_vget_v_u32m2_u32m1 (vuint32m2_t src) +{ + return vget_u32m1(src, 1); +} + +vuint32m1_t +test_vget_v_u32m4_u32m1 (vuint32m4_t src) +{ + return vget_u32m1(src, 1); +} + +vuint32m1_t +test_vget_v_u32m8_u32m1 (vuint32m8_t src) +{ + return vget_u32m1(src, 1); +} + +vuint32m2_t +test_vget_v_u32m4_u32m2 (vuint32m4_t src) +{ + return vget_u32m2(src, 1); +} + +vuint32m2_t +test_vget_v_u32m8_u32m2 (vuint32m8_t src) +{ + return vget_u32m2(src, 1); +} + +vuint32m4_t +test_vget_v_u32m8_u32m4 (vuint32m8_t src) +{ + return vget_u32m4(src, 1); +} + +vuint64m2_t +test_vset_v_u64m1_u64m2 (vuint64m2_t dest, vuint64m1_t val) +{ + return vset(dest, 1, val); +} + +vuint64m4_t +test_vset_v_u64m1_u64m4 (vuint64m4_t dest, vuint64m1_t val) +{ + return vset(dest, 1, val); +} + +vuint64m4_t +test_vset_v_u64m2_u64m4 (vuint64m4_t dest, vuint64m2_t val) +{ + return vset(dest, 1, val); +} + +vuint64m8_t +test_vset_v_u64m1_u64m8 (vuint64m8_t dest, vuint64m1_t val) +{ + return vset(dest, 1, val); +} + +vuint64m8_t +test_vset_v_u64m2_u64m8 (vuint64m8_t dest, vuint64m2_t val) +{ + return vset(dest, 1, val); +} + +vuint64m8_t +test_vset_v_u64m4_u64m8 (vuint64m8_t dest, vuint64m4_t val) +{ + return vset(dest, 1, val); +} + +vuint64m1_t +test_vget_v_u64m2_u64m1 (vuint64m2_t src) +{ + return vget_u64m1(src, 1); +} + +vuint64m1_t +test_vget_v_u64m4_u64m1 (vuint64m4_t src) +{ + return vget_u64m1(src, 1); +} + +vuint64m1_t +test_vget_v_u64m8_u64m1 (vuint64m8_t src) +{ + return vget_u64m1(src, 1); +} + +vuint64m2_t +test_vget_v_u64m4_u64m2 (vuint64m4_t src) +{ + return vget_u64m2(src, 1); +} + +vuint64m2_t +test_vget_v_u64m8_u64m2 (vuint64m8_t src) +{ + return vget_u64m2(src, 1); +} + +vuint64m4_t +test_vget_v_u64m8_u64m4 (vuint64m8_t src) +{ + return vget_u64m4(src, 1); +} + +vfloat32m2_t +test_vset_v_f32m1_f32m2 (vfloat32m2_t dest, vfloat32m1_t val) +{ + return vset(dest, 1, val); +} + +vfloat32m4_t +test_vset_v_f32m1_f32m4 (vfloat32m4_t dest, vfloat32m1_t val) +{ + return vset(dest, 1, val); +} + +vfloat32m4_t +test_vset_v_f32m2_f32m4 (vfloat32m4_t dest, vfloat32m2_t val) +{ + return vset(dest, 1, val); +} + +vfloat32m8_t +test_vset_v_f32m1_f32m8 (vfloat32m8_t dest, vfloat32m1_t val) +{ + return vset(dest, 1, val); +} + +vfloat32m8_t +test_vset_v_f32m2_f32m8 (vfloat32m8_t dest, vfloat32m2_t val) +{ + return vset(dest, 1, val); +} + +vfloat32m8_t +test_vset_v_f32m4_f32m8 (vfloat32m8_t dest, vfloat32m4_t val) +{ + return vset(dest, 1, val); +} + +vfloat32m1_t +test_vget_v_f32m2_f32m1 (vfloat32m2_t src) +{ + return vget_f32m1(src, 1); +} + +vfloat32m1_t +test_vget_v_f32m4_f32m1 (vfloat32m4_t src) +{ + return vget_f32m1(src, 1); +} + +vfloat32m1_t +test_vget_v_f32m8_f32m1 (vfloat32m8_t src) +{ + return vget_f32m1(src, 1); +} + +vfloat32m2_t +test_vget_v_f32m4_f32m2 (vfloat32m4_t src) +{ + return vget_f32m2(src, 1); +} + +vfloat32m2_t +test_vget_v_f32m8_f32m2 (vfloat32m8_t src) +{ + return vget_f32m2(src, 1); +} + +vfloat32m4_t +test_vget_v_f32m8_f32m4 (vfloat32m8_t src) +{ + return vget_f32m4(src, 1); +} + +vfloat64m2_t +test_vset_v_f64m1_f64m2 (vfloat64m2_t dest, vfloat64m1_t val) +{ + return vset(dest, 1, val); +} + +vfloat64m4_t +test_vset_v_f64m1_f64m4 (vfloat64m4_t dest, vfloat64m1_t val) +{ + return vset(dest, 1, val); +} + +vfloat64m4_t +test_vset_v_f64m2_f64m4 (vfloat64m4_t dest, vfloat64m2_t val) +{ + return vset(dest, 1, val); +} + +vfloat64m8_t +test_vset_v_f64m1_f64m8 (vfloat64m8_t dest, vfloat64m1_t val) +{ + return vset(dest, 1, val); +} + +vfloat64m8_t +test_vset_v_f64m2_f64m8 (vfloat64m8_t dest, vfloat64m2_t val) +{ + return vset(dest, 1, val); +} + +vfloat64m8_t +test_vset_v_f64m4_f64m8 (vfloat64m8_t dest, vfloat64m4_t val) +{ + return vset(dest, 1, val); +} + +vfloat64m1_t +test_vget_v_f64m2_f64m1 (vfloat64m2_t src) +{ + return vget_f64m1(src, 1); +} + +vfloat64m1_t +test_vget_v_f64m4_f64m1 (vfloat64m4_t src) +{ + return vget_f64m1(src, 1); +} + +vfloat64m1_t +test_vget_v_f64m8_f64m1 (vfloat64m8_t src) +{ + return vget_f64m1(src, 1); +} + +vfloat64m2_t +test_vget_v_f64m4_f64m2 (vfloat64m4_t src) +{ + return vget_f64m2(src, 1); +} + +vfloat64m2_t +test_vget_v_f64m8_f64m2 (vfloat64m8_t src) +{ + return vget_f64m2(src, 1); +} + +vfloat64m4_t +test_vget_v_f64m8_f64m4 (vfloat64m8_t src) +{ + return vget_f64m4(src, 1); +} +/* { dg-final { scan-assembler-times {vmv1r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 60 } } */ +/* { dg-final { scan-assembler-times {vmv2r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 40 } } */ +/* { dg-final { scan-assembler-times {vmv4r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 20 } } */ + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/intrinsic/set-get.c b/gcc/testsuite/gcc.target/riscv/rvv/intrinsic/set-get.c new file mode 100644 index 00000000000..33d5a129aae --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/intrinsic/set-get.c @@ -0,0 +1,730 @@ + +/* { dg-do compile } */ +/* { dg-skip-if "test vector intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */ + +#include +#include + + +vint8m2_t +test_vset_v_i8m1_i8m2 (vint8m2_t dest, vint8m1_t val) +{ + return vset_v_i8m1_i8m2(dest, 1, val); +} + +vint8m4_t +test_vset_v_i8m1_i8m4 (vint8m4_t dest, vint8m1_t val) +{ + return vset_v_i8m1_i8m4(dest, 1, val); +} + +vint8m4_t +test_vset_v_i8m2_i8m4 (vint8m4_t dest, vint8m2_t val) +{ + return vset_v_i8m2_i8m4(dest, 1, val); +} + +vint8m8_t +test_vset_v_i8m1_i8m8 (vint8m8_t dest, vint8m1_t val) +{ + return vset_v_i8m1_i8m8(dest, 1, val); +} + +vint8m8_t +test_vset_v_i8m2_i8m8 (vint8m8_t dest, vint8m2_t val) +{ + return vset_v_i8m2_i8m8(dest, 1, val); +} + +vint8m8_t +test_vset_v_i8m4_i8m8 (vint8m8_t dest, vint8m4_t val) +{ + return vset_v_i8m4_i8m8(dest, 1, val); +} + +vint8m1_t +test_vget_v_i8m2_i8m1 (vint8m2_t src) +{ + return vget_v_i8m2_i8m1(src, 1); +} + +vint8m1_t +test_vget_v_i8m4_i8m1 (vint8m4_t src) +{ + return vget_v_i8m4_i8m1(src, 1); +} + +vint8m1_t +test_vget_v_i8m8_i8m1 (vint8m8_t src) +{ + return vget_v_i8m8_i8m1(src, 1); +} + +vint8m2_t +test_vget_v_i8m4_i8m2 (vint8m4_t src) +{ + return vget_v_i8m4_i8m2(src, 1); +} + +vint8m2_t +test_vget_v_i8m8_i8m2 (vint8m8_t src) +{ + return vget_v_i8m8_i8m2(src, 1); +} + +vint8m4_t +test_vget_v_i8m8_i8m4 (vint8m8_t src) +{ + return vget_v_i8m8_i8m4(src, 1); +} + +vint16m2_t +test_vset_v_i16m1_i16m2 (vint16m2_t dest, vint16m1_t val) +{ + return vset_v_i16m1_i16m2(dest, 1, val); +} + +vint16m4_t +test_vset_v_i16m1_i16m4 (vint16m4_t dest, vint16m1_t val) +{ + return vset_v_i16m1_i16m4(dest, 1, val); +} + +vint16m4_t +test_vset_v_i16m2_i16m4 (vint16m4_t dest, vint16m2_t val) +{ + return vset_v_i16m2_i16m4(dest, 1, val); +} + +vint16m8_t +test_vset_v_i16m1_i16m8 (vint16m8_t dest, vint16m1_t val) +{ + return vset_v_i16m1_i16m8(dest, 1, val); +} + +vint16m8_t +test_vset_v_i16m2_i16m8 (vint16m8_t dest, vint16m2_t val) +{ + return vset_v_i16m2_i16m8(dest, 1, val); +} + +vint16m8_t +test_vset_v_i16m4_i16m8 (vint16m8_t dest, vint16m4_t val) +{ + return vset_v_i16m4_i16m8(dest, 1, val); +} + +vint16m1_t +test_vget_v_i16m2_i16m1 (vint16m2_t src) +{ + return vget_v_i16m2_i16m1(src, 1); +} + +vint16m1_t +test_vget_v_i16m4_i16m1 (vint16m4_t src) +{ + return vget_v_i16m4_i16m1(src, 1); +} + +vint16m1_t +test_vget_v_i16m8_i16m1 (vint16m8_t src) +{ + return vget_v_i16m8_i16m1(src, 1); +} + +vint16m2_t +test_vget_v_i16m4_i16m2 (vint16m4_t src) +{ + return vget_v_i16m4_i16m2(src, 1); +} + +vint16m2_t +test_vget_v_i16m8_i16m2 (vint16m8_t src) +{ + return vget_v_i16m8_i16m2(src, 1); +} + +vint16m4_t +test_vget_v_i16m8_i16m4 (vint16m8_t src) +{ + return vget_v_i16m8_i16m4(src, 1); +} + +vint32m2_t +test_vset_v_i32m1_i32m2 (vint32m2_t dest, vint32m1_t val) +{ + return vset_v_i32m1_i32m2(dest, 1, val); +} + +vint32m4_t +test_vset_v_i32m1_i32m4 (vint32m4_t dest, vint32m1_t val) +{ + return vset_v_i32m1_i32m4(dest, 1, val); +} + +vint32m4_t +test_vset_v_i32m2_i32m4 (vint32m4_t dest, vint32m2_t val) +{ + return vset_v_i32m2_i32m4(dest, 1, val); +} + +vint32m8_t +test_vset_v_i32m1_i32m8 (vint32m8_t dest, vint32m1_t val) +{ + return vset_v_i32m1_i32m8(dest, 1, val); +} + +vint32m8_t +test_vset_v_i32m2_i32m8 (vint32m8_t dest, vint32m2_t val) +{ + return vset_v_i32m2_i32m8(dest, 1, val); +} + +vint32m8_t +test_vset_v_i32m4_i32m8 (vint32m8_t dest, vint32m4_t val) +{ + return vset_v_i32m4_i32m8(dest, 1, val); +} + +vint32m1_t +test_vget_v_i32m2_i32m1 (vint32m2_t src) +{ + return vget_v_i32m2_i32m1(src, 1); +} + +vint32m1_t +test_vget_v_i32m4_i32m1 (vint32m4_t src) +{ + return vget_v_i32m4_i32m1(src, 1); +} + +vint32m1_t +test_vget_v_i32m8_i32m1 (vint32m8_t src) +{ + return vget_v_i32m8_i32m1(src, 1); +} + +vint32m2_t +test_vget_v_i32m4_i32m2 (vint32m4_t src) +{ + return vget_v_i32m4_i32m2(src, 1); +} + +vint32m2_t +test_vget_v_i32m8_i32m2 (vint32m8_t src) +{ + return vget_v_i32m8_i32m2(src, 1); +} + +vint32m4_t +test_vget_v_i32m8_i32m4 (vint32m8_t src) +{ + return vget_v_i32m8_i32m4(src, 1); +} + +vint64m2_t +test_vset_v_i64m1_i64m2 (vint64m2_t dest, vint64m1_t val) +{ + return vset_v_i64m1_i64m2(dest, 1, val); +} + +vint64m4_t +test_vset_v_i64m1_i64m4 (vint64m4_t dest, vint64m1_t val) +{ + return vset_v_i64m1_i64m4(dest, 1, val); +} + +vint64m4_t +test_vset_v_i64m2_i64m4 (vint64m4_t dest, vint64m2_t val) +{ + return vset_v_i64m2_i64m4(dest, 1, val); +} + +vint64m8_t +test_vset_v_i64m1_i64m8 (vint64m8_t dest, vint64m1_t val) +{ + return vset_v_i64m1_i64m8(dest, 1, val); +} + +vint64m8_t +test_vset_v_i64m2_i64m8 (vint64m8_t dest, vint64m2_t val) +{ + return vset_v_i64m2_i64m8(dest, 1, val); +} + +vint64m8_t +test_vset_v_i64m4_i64m8 (vint64m8_t dest, vint64m4_t val) +{ + return vset_v_i64m4_i64m8(dest, 1, val); +} + +vint64m1_t +test_vget_v_i64m2_i64m1 (vint64m2_t src) +{ + return vget_v_i64m2_i64m1(src, 1); +} + +vint64m1_t +test_vget_v_i64m4_i64m1 (vint64m4_t src) +{ + return vget_v_i64m4_i64m1(src, 1); +} + +vint64m1_t +test_vget_v_i64m8_i64m1 (vint64m8_t src) +{ + return vget_v_i64m8_i64m1(src, 1); +} + +vint64m2_t +test_vget_v_i64m4_i64m2 (vint64m4_t src) +{ + return vget_v_i64m4_i64m2(src, 1); +} + +vint64m2_t +test_vget_v_i64m8_i64m2 (vint64m8_t src) +{ + return vget_v_i64m8_i64m2(src, 1); +} + +vint64m4_t +test_vget_v_i64m8_i64m4 (vint64m8_t src) +{ + return vget_v_i64m8_i64m4(src, 1); +} + +vuint8m2_t +test_vset_v_u8m1_u8m2 (vuint8m2_t dest, vuint8m1_t val) +{ + return vset_v_u8m1_u8m2(dest, 1, val); +} + +vuint8m4_t +test_vset_v_u8m1_u8m4 (vuint8m4_t dest, vuint8m1_t val) +{ + return vset_v_u8m1_u8m4(dest, 1, val); +} + +vuint8m4_t +test_vset_v_u8m2_u8m4 (vuint8m4_t dest, vuint8m2_t val) +{ + return vset_v_u8m2_u8m4(dest, 1, val); +} + +vuint8m8_t +test_vset_v_u8m1_u8m8 (vuint8m8_t dest, vuint8m1_t val) +{ + return vset_v_u8m1_u8m8(dest, 1, val); +} + +vuint8m8_t +test_vset_v_u8m2_u8m8 (vuint8m8_t dest, vuint8m2_t val) +{ + return vset_v_u8m2_u8m8(dest, 1, val); +} + +vuint8m8_t +test_vset_v_u8m4_u8m8 (vuint8m8_t dest, vuint8m4_t val) +{ + return vset_v_u8m4_u8m8(dest, 1, val); +} + +vuint8m1_t +test_vget_v_u8m2_u8m1 (vuint8m2_t src) +{ + return vget_v_u8m2_u8m1(src, 1); +} + +vuint8m1_t +test_vget_v_u8m4_u8m1 (vuint8m4_t src) +{ + return vget_v_u8m4_u8m1(src, 1); +} + +vuint8m1_t +test_vget_v_u8m8_u8m1 (vuint8m8_t src) +{ + return vget_v_u8m8_u8m1(src, 1); +} + +vuint8m2_t +test_vget_v_u8m4_u8m2 (vuint8m4_t src) +{ + return vget_v_u8m4_u8m2(src, 1); +} + +vuint8m2_t +test_vget_v_u8m8_u8m2 (vuint8m8_t src) +{ + return vget_v_u8m8_u8m2(src, 1); +} + +vuint8m4_t +test_vget_v_u8m8_u8m4 (vuint8m8_t src) +{ + return vget_v_u8m8_u8m4(src, 1); +} + +vuint16m2_t +test_vset_v_u16m1_u16m2 (vuint16m2_t dest, vuint16m1_t val) +{ + return vset_v_u16m1_u16m2(dest, 1, val); +} + +vuint16m4_t +test_vset_v_u16m1_u16m4 (vuint16m4_t dest, vuint16m1_t val) +{ + return vset_v_u16m1_u16m4(dest, 1, val); +} + +vuint16m4_t +test_vset_v_u16m2_u16m4 (vuint16m4_t dest, vuint16m2_t val) +{ + return vset_v_u16m2_u16m4(dest, 1, val); +} + +vuint16m8_t +test_vset_v_u16m1_u16m8 (vuint16m8_t dest, vuint16m1_t val) +{ + return vset_v_u16m1_u16m8(dest, 1, val); +} + +vuint16m8_t +test_vset_v_u16m2_u16m8 (vuint16m8_t dest, vuint16m2_t val) +{ + return vset_v_u16m2_u16m8(dest, 1, val); +} + +vuint16m8_t +test_vset_v_u16m4_u16m8 (vuint16m8_t dest, vuint16m4_t val) +{ + return vset_v_u16m4_u16m8(dest, 1, val); +} + +vuint16m1_t +test_vget_v_u16m2_u16m1 (vuint16m2_t src) +{ + return vget_v_u16m2_u16m1(src, 1); +} + +vuint16m1_t +test_vget_v_u16m4_u16m1 (vuint16m4_t src) +{ + return vget_v_u16m4_u16m1(src, 1); +} + +vuint16m1_t +test_vget_v_u16m8_u16m1 (vuint16m8_t src) +{ + return vget_v_u16m8_u16m1(src, 1); +} + +vuint16m2_t +test_vget_v_u16m4_u16m2 (vuint16m4_t src) +{ + return vget_v_u16m4_u16m2(src, 1); +} + +vuint16m2_t +test_vget_v_u16m8_u16m2 (vuint16m8_t src) +{ + return vget_v_u16m8_u16m2(src, 1); +} + +vuint16m4_t +test_vget_v_u16m8_u16m4 (vuint16m8_t src) +{ + return vget_v_u16m8_u16m4(src, 1); +} + +vuint32m2_t +test_vset_v_u32m1_u32m2 (vuint32m2_t dest, vuint32m1_t val) +{ + return vset_v_u32m1_u32m2(dest, 1, val); +} + +vuint32m4_t +test_vset_v_u32m1_u32m4 (vuint32m4_t dest, vuint32m1_t val) +{ + return vset_v_u32m1_u32m4(dest, 1, val); +} + +vuint32m4_t +test_vset_v_u32m2_u32m4 (vuint32m4_t dest, vuint32m2_t val) +{ + return vset_v_u32m2_u32m4(dest, 1, val); +} + +vuint32m8_t +test_vset_v_u32m1_u32m8 (vuint32m8_t dest, vuint32m1_t val) +{ + return vset_v_u32m1_u32m8(dest, 1, val); +} + +vuint32m8_t +test_vset_v_u32m2_u32m8 (vuint32m8_t dest, vuint32m2_t val) +{ + return vset_v_u32m2_u32m8(dest, 1, val); +} + +vuint32m8_t +test_vset_v_u32m4_u32m8 (vuint32m8_t dest, vuint32m4_t val) +{ + return vset_v_u32m4_u32m8(dest, 1, val); +} + +vuint32m1_t +test_vget_v_u32m2_u32m1 (vuint32m2_t src) +{ + return vget_v_u32m2_u32m1(src, 1); +} + +vuint32m1_t +test_vget_v_u32m4_u32m1 (vuint32m4_t src) +{ + return vget_v_u32m4_u32m1(src, 1); +} + +vuint32m1_t +test_vget_v_u32m8_u32m1 (vuint32m8_t src) +{ + return vget_v_u32m8_u32m1(src, 1); +} + +vuint32m2_t +test_vget_v_u32m4_u32m2 (vuint32m4_t src) +{ + return vget_v_u32m4_u32m2(src, 1); +} + +vuint32m2_t +test_vget_v_u32m8_u32m2 (vuint32m8_t src) +{ + return vget_v_u32m8_u32m2(src, 1); +} + +vuint32m4_t +test_vget_v_u32m8_u32m4 (vuint32m8_t src) +{ + return vget_v_u32m8_u32m4(src, 1); +} + +vuint64m2_t +test_vset_v_u64m1_u64m2 (vuint64m2_t dest, vuint64m1_t val) +{ + return vset_v_u64m1_u64m2(dest, 1, val); +} + +vuint64m4_t +test_vset_v_u64m1_u64m4 (vuint64m4_t dest, vuint64m1_t val) +{ + return vset_v_u64m1_u64m4(dest, 1, val); +} + +vuint64m4_t +test_vset_v_u64m2_u64m4 (vuint64m4_t dest, vuint64m2_t val) +{ + return vset_v_u64m2_u64m4(dest, 1, val); +} + +vuint64m8_t +test_vset_v_u64m1_u64m8 (vuint64m8_t dest, vuint64m1_t val) +{ + return vset_v_u64m1_u64m8(dest, 1, val); +} + +vuint64m8_t +test_vset_v_u64m2_u64m8 (vuint64m8_t dest, vuint64m2_t val) +{ + return vset_v_u64m2_u64m8(dest, 1, val); +} + +vuint64m8_t +test_vset_v_u64m4_u64m8 (vuint64m8_t dest, vuint64m4_t val) +{ + return vset_v_u64m4_u64m8(dest, 1, val); +} + +vuint64m1_t +test_vget_v_u64m2_u64m1 (vuint64m2_t src) +{ + return vget_v_u64m2_u64m1(src, 1); +} + +vuint64m1_t +test_vget_v_u64m4_u64m1 (vuint64m4_t src) +{ + return vget_v_u64m4_u64m1(src, 1); +} + +vuint64m1_t +test_vget_v_u64m8_u64m1 (vuint64m8_t src) +{ + return vget_v_u64m8_u64m1(src, 1); +} + +vuint64m2_t +test_vget_v_u64m4_u64m2 (vuint64m4_t src) +{ + return vget_v_u64m4_u64m2(src, 1); +} + +vuint64m2_t +test_vget_v_u64m8_u64m2 (vuint64m8_t src) +{ + return vget_v_u64m8_u64m2(src, 1); +} + +vuint64m4_t +test_vget_v_u64m8_u64m4 (vuint64m8_t src) +{ + return vget_v_u64m8_u64m4(src, 1); +} + +vfloat32m2_t +test_vset_v_f32m1_f32m2 (vfloat32m2_t dest, vfloat32m1_t val) +{ + return vset_v_f32m1_f32m2(dest, 1, val); +} + +vfloat32m4_t +test_vset_v_f32m1_f32m4 (vfloat32m4_t dest, vfloat32m1_t val) +{ + return vset_v_f32m1_f32m4(dest, 1, val); +} + +vfloat32m4_t +test_vset_v_f32m2_f32m4 (vfloat32m4_t dest, vfloat32m2_t val) +{ + return vset_v_f32m2_f32m4(dest, 1, val); +} + +vfloat32m8_t +test_vset_v_f32m1_f32m8 (vfloat32m8_t dest, vfloat32m1_t val) +{ + return vset_v_f32m1_f32m8(dest, 1, val); +} + +vfloat32m8_t +test_vset_v_f32m2_f32m8 (vfloat32m8_t dest, vfloat32m2_t val) +{ + return vset_v_f32m2_f32m8(dest, 1, val); +} + +vfloat32m8_t +test_vset_v_f32m4_f32m8 (vfloat32m8_t dest, vfloat32m4_t val) +{ + return vset_v_f32m4_f32m8(dest, 1, val); +} + +vfloat32m1_t +test_vget_v_f32m2_f32m1 (vfloat32m2_t src) +{ + return vget_v_f32m2_f32m1(src, 1); +} + +vfloat32m1_t +test_vget_v_f32m4_f32m1 (vfloat32m4_t src) +{ + return vget_v_f32m4_f32m1(src, 1); +} + +vfloat32m1_t +test_vget_v_f32m8_f32m1 (vfloat32m8_t src) +{ + return vget_v_f32m8_f32m1(src, 1); +} + +vfloat32m2_t +test_vget_v_f32m4_f32m2 (vfloat32m4_t src) +{ + return vget_v_f32m4_f32m2(src, 1); +} + +vfloat32m2_t +test_vget_v_f32m8_f32m2 (vfloat32m8_t src) +{ + return vget_v_f32m8_f32m2(src, 1); +} + +vfloat32m4_t +test_vget_v_f32m8_f32m4 (vfloat32m8_t src) +{ + return vget_v_f32m8_f32m4(src, 1); +} + +vfloat64m2_t +test_vset_v_f64m1_f64m2 (vfloat64m2_t dest, vfloat64m1_t val) +{ + return vset_v_f64m1_f64m2(dest, 1, val); +} + +vfloat64m4_t +test_vset_v_f64m1_f64m4 (vfloat64m4_t dest, vfloat64m1_t val) +{ + return vset_v_f64m1_f64m4(dest, 1, val); +} + +vfloat64m4_t +test_vset_v_f64m2_f64m4 (vfloat64m4_t dest, vfloat64m2_t val) +{ + return vset_v_f64m2_f64m4(dest, 1, val); +} + +vfloat64m8_t +test_vset_v_f64m1_f64m8 (vfloat64m8_t dest, vfloat64m1_t val) +{ + return vset_v_f64m1_f64m8(dest, 1, val); +} + +vfloat64m8_t +test_vset_v_f64m2_f64m8 (vfloat64m8_t dest, vfloat64m2_t val) +{ + return vset_v_f64m2_f64m8(dest, 1, val); +} + +vfloat64m8_t +test_vset_v_f64m4_f64m8 (vfloat64m8_t dest, vfloat64m4_t val) +{ + return vset_v_f64m4_f64m8(dest, 1, val); +} + +vfloat64m1_t +test_vget_v_f64m2_f64m1 (vfloat64m2_t src) +{ + return vget_v_f64m2_f64m1(src, 1); +} + +vfloat64m1_t +test_vget_v_f64m4_f64m1 (vfloat64m4_t src) +{ + return vget_v_f64m4_f64m1(src, 1); +} + +vfloat64m1_t +test_vget_v_f64m8_f64m1 (vfloat64m8_t src) +{ + return vget_v_f64m8_f64m1(src, 1); +} + +vfloat64m2_t +test_vget_v_f64m4_f64m2 (vfloat64m4_t src) +{ + return vget_v_f64m4_f64m2(src, 1); +} + +vfloat64m2_t +test_vget_v_f64m8_f64m2 (vfloat64m8_t src) +{ + return vget_v_f64m8_f64m2(src, 1); +} + +vfloat64m4_t +test_vget_v_f64m8_f64m4 (vfloat64m8_t src) +{ + return vget_v_f64m8_f64m4(src, 1); +} +/* { dg-final { scan-assembler-times {vmv1r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 60 } } */ +/* { dg-final { scan-assembler-times {vmv2r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 40 } } */ +/* { dg-final { scan-assembler-times {vmv4r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 20 } } */