RISC-V: Add vmul.vx C++ API testcase

Message ID 20230203075316.208999-1-juzhe.zhong@rivai.ai
State Committed
Commit 8c971d59a7c16247954a90335b50bb5b2cc88d34
Headers
Series RISC-V: Add vmul.vx C++ API testcase |

Commit Message

钟居哲 Feb. 3, 2023, 7:53 a.m. UTC
  From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>

gcc/testsuite/ChangeLog:

        * g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_rv32-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_rv32-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_rv32-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_rv64-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_rv64-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_rv64-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C: New test.
        * g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C: New test.

---
 .../riscv/rvv/base/vmul_vx_mu_rv32-1.C        | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_mu_rv32-2.C        | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_mu_rv32-3.C        | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_mu_rv64-1.C        | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_mu_rv64-2.C        | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_mu_rv64-3.C        | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_rv32-1.C           | 572 +++++++++++++++++
 .../riscv/rvv/base/vmul_vx_rv32-2.C           | 572 +++++++++++++++++
 .../riscv/rvv/base/vmul_vx_rv32-3.C           | 572 +++++++++++++++++
 .../riscv/rvv/base/vmul_vx_rv64-1.C           | 578 ++++++++++++++++++
 .../riscv/rvv/base/vmul_vx_rv64-2.C           | 578 ++++++++++++++++++
 .../riscv/rvv/base/vmul_vx_rv64-3.C           | 578 ++++++++++++++++++
 .../riscv/rvv/base/vmul_vx_tu_rv32-1.C        | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tu_rv32-2.C        | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tu_rv32-3.C        | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tu_rv64-1.C        | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_tu_rv64-2.C        | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_tu_rv64-3.C        | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_tum_rv32-1.C       | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tum_rv32-2.C       | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tum_rv32-3.C       | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tum_rv64-1.C       | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_tum_rv64-2.C       | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_tum_rv64-3.C       | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_tumu_rv32-1.C      | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tumu_rv32-2.C      | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tumu_rv32-3.C      | 289 +++++++++
 .../riscv/rvv/base/vmul_vx_tumu_rv64-1.C      | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_tumu_rv64-2.C      | 292 +++++++++
 .../riscv/rvv/base/vmul_vx_tumu_rv64-3.C      | 292 +++++++++
 30 files changed, 10422 insertions(+)
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C
 create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C
  

Patch

diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C
new file mode 100644
index 00000000000..323f55eb38c
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C
new file mode 100644
index 00000000000..6f264d9563c
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C
new file mode 100644
index 00000000000..9e3a93cb763
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C
new file mode 100644
index 00000000000..77e5224d69b
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C
new file mode 100644
index 00000000000..4104fe933c1
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C
new file mode 100644
index 00000000000..ad699433228
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-1.C
new file mode 100644
index 00000000000..c68218dfff8
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-1.C
@@ -0,0 +1,572 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-2.C
new file mode 100644
index 00000000000..f0403fb4c9a
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-2.C
@@ -0,0 +1,572 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-3.C
new file mode 100644
index 00000000000..e1625644f16
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-3.C
@@ -0,0 +1,572 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-1.C
new file mode 100644
index 00000000000..badbd3fd2f7
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-1.C
@@ -0,0 +1,578 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-2.C
new file mode 100644
index 00000000000..6c36d54f448
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-2.C
@@ -0,0 +1,578 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-3.C
new file mode 100644
index 00000000000..c3f098a12e0
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-3.C
@@ -0,0 +1,578 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C
new file mode 100644
index 00000000000..dbf865f8ee8
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C
new file mode 100644
index 00000000000..51152cae795
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C
new file mode 100644
index 00000000000..e2abfddc13d
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C
new file mode 100644
index 00000000000..de74cec4747
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C
new file mode 100644
index 00000000000..fa687276ef1
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C
new file mode 100644
index 00000000000..0865b9b543f
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C
new file mode 100644
index 00000000000..d9292b768e5
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C
new file mode 100644
index 00000000000..691d0863091
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C
new file mode 100644
index 00000000000..113f1f7c159
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C
new file mode 100644
index 00000000000..3d7b6879ebb
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C
new file mode 100644
index 00000000000..853580b8eb2
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C
new file mode 100644
index 00000000000..5fed85d8915
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C
new file mode 100644
index 00000000000..74ec5b740ec
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C
new file mode 100644
index 00000000000..a6857f1d601
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C
new file mode 100644
index 00000000000..a07bd571ea0
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C
@@ -0,0 +1,289 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C
new file mode 100644
index 00000000000..46901e0ae2a
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C
new file mode 100644
index 00000000000..68c768234b1
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C
new file mode 100644
index 00000000000..75143a62606
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C
@@ -0,0 +1,292 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */