RISC-V: Add vle.v C API intrinsics testcases

Message ID 20230119141249.293487-1-juzhe.zhong@rivai.ai
State Committed
Commit 931a042f7e6c3ee0741cf4365b15f4e27d364209
Headers
Series RISC-V: Add vle.v C API intrinsics testcases |

Commit Message

钟居哲 Jan. 19, 2023, 2:12 p.m. UTC
  From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/rvv/base/vle-1.c: New test.
        * gcc.target/riscv/rvv/base/vle-2.c: New test.
        * gcc.target/riscv/rvv/base/vle-3.c: New test.
        * gcc.target/riscv/rvv/base/vle_m-1.c: New test.
        * gcc.target/riscv/rvv/base/vle_m-2.c: New test.
        * gcc.target/riscv/rvv/base/vle_m-3.c: New test.
        * gcc.target/riscv/rvv/base/vle_mu-1.c: New test.
        * gcc.target/riscv/rvv/base/vle_mu-2.c: New test.
        * gcc.target/riscv/rvv/base/vle_mu-3.c: New test.
        * gcc.target/riscv/rvv/base/vle_tu-1.c: New test.
        * gcc.target/riscv/rvv/base/vle_tu-2.c: New test.
        * gcc.target/riscv/rvv/base/vle_tu-3.c: New test.
        * gcc.target/riscv/rvv/base/vle_tum-1.c: New test.
        * gcc.target/riscv/rvv/base/vle_tum-2.c: New test.
        * gcc.target/riscv/rvv/base/vle_tum-3.c: New test.
        * gcc.target/riscv/rvv/base/vle_tumu-1.c: New test.
        * gcc.target/riscv/rvv/base/vle_tumu-2.c: New test.
        * gcc.target/riscv/rvv/base/vle_tumu-3.c: New test.

---
 .../gcc.target/riscv/rvv/base/vle-1.c         | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle-2.c         | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle-3.c         | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_m-1.c       | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_m-2.c       | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_m-3.c       | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_mu-1.c      | 344 +++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_mu-2.c      | 344 +++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_mu-3.c      | 344 +++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tu-1.c      | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tu-2.c      | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tu-3.c      | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tum-1.c     | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tum-2.c     | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tum-3.c     | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tumu-1.c    | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tumu-2.c    | 345 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/vle_tumu-3.c    | 345 ++++++++++++++++++
 18 files changed, 6207 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle-1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle-2.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle-3.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-2.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-3.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-2.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-3.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-2.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-3.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-2.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-3.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-1.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-2.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-3.c
  

Patch

diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle-1.c
new file mode 100644
index 00000000000..6335e7bbe4d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle-1.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t 
+test___riscv_vle8_v_i8mf8(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8(base,vl);
+}
+
+vint8mf4_t 
+test___riscv_vle8_v_i8mf4(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4(base,vl);
+}
+
+vint8mf2_t 
+test___riscv_vle8_v_i8mf2(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2(base,vl);
+}
+
+vint8m1_t 
+test___riscv_vle8_v_i8m1(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1(base,vl);
+}
+
+vint8m2_t 
+test___riscv_vle8_v_i8m2(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2(base,vl);
+}
+
+vint8m4_t 
+test___riscv_vle8_v_i8m4(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4(base,vl);
+}
+
+vint8m8_t 
+test___riscv_vle8_v_i8m8(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8(base,vl);
+}
+
+vuint8mf8_t 
+test___riscv_vle8_v_u8mf8(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8(base,vl);
+}
+
+vuint8mf4_t 
+test___riscv_vle8_v_u8mf4(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4(base,vl);
+}
+
+vuint8mf2_t 
+test___riscv_vle8_v_u8mf2(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2(base,vl);
+}
+
+vuint8m1_t 
+test___riscv_vle8_v_u8m1(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1(base,vl);
+}
+
+vuint8m2_t 
+test___riscv_vle8_v_u8m2(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2(base,vl);
+}
+
+vuint8m4_t 
+test___riscv_vle8_v_u8m4(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4(base,vl);
+}
+
+vuint8m8_t 
+test___riscv_vle8_v_u8m8(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8(base,vl);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4(base,vl);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2(base,vl);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1(base,vl);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2(base,vl);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4(base,vl);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8(base,vl);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4(base,vl);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2(base,vl);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1(base,vl);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2(base,vl);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4(base,vl);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8(base,vl);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2(base,vl);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1(base,vl);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2(base,vl);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4(base,vl);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8(base,vl);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2(base,vl);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1(base,vl);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2(base,vl);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4(base,vl);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8(base,vl);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2(base,vl);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1(base,vl);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2(base,vl);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4(base,vl);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8(base,vl);
+}
+vint64m1_t
+test___riscv_vle64_v_i64m1(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1(base,vl);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2(base,vl);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4(base,vl);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8(base,vl);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1(base,vl);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2(base,vl);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4(base,vl);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8(base,vl);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1(base,vl);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2(base,vl);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4(base,vl);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8(base,vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle-2.c
new file mode 100644
index 00000000000..f47137b38bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle-2.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t 
+test___riscv_vle8_v_i8mf8(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8(base,31);
+}
+
+vint8mf4_t 
+test___riscv_vle8_v_i8mf4(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4(base,31);
+}
+
+vint8mf2_t 
+test___riscv_vle8_v_i8mf2(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2(base,31);
+}
+
+vint8m1_t 
+test___riscv_vle8_v_i8m1(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1(base,31);
+}
+
+vint8m2_t 
+test___riscv_vle8_v_i8m2(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2(base,31);
+}
+
+vint8m4_t 
+test___riscv_vle8_v_i8m4(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4(base,31);
+}
+
+vint8m8_t 
+test___riscv_vle8_v_i8m8(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8(base,31);
+}
+
+vuint8mf8_t 
+test___riscv_vle8_v_u8mf8(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8(base,31);
+}
+
+vuint8mf4_t 
+test___riscv_vle8_v_u8mf4(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4(base,31);
+}
+
+vuint8mf2_t 
+test___riscv_vle8_v_u8mf2(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2(base,31);
+}
+
+vuint8m1_t 
+test___riscv_vle8_v_u8m1(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1(base,31);
+}
+
+vuint8m2_t 
+test___riscv_vle8_v_u8m2(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2(base,31);
+}
+
+vuint8m4_t 
+test___riscv_vle8_v_u8m4(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4(base,31);
+}
+
+vuint8m8_t 
+test___riscv_vle8_v_u8m8(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8(base,31);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4(base,31);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2(base,31);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1(base,31);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2(base,31);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4(base,31);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8(base,31);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4(base,31);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2(base,31);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1(base,31);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2(base,31);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4(base,31);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8(base,31);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2(base,31);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1(base,31);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2(base,31);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4(base,31);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8(base,31);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2(base,31);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1(base,31);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2(base,31);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4(base,31);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8(base,31);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2(base,31);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1(base,31);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2(base,31);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4(base,31);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8(base,31);
+}
+vint64m1_t
+test___riscv_vle64_v_i64m1(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1(base,31);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2(base,31);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4(base,31);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8(base,31);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1(base,31);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2(base,31);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4(base,31);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8(base,31);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1(base,31);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2(base,31);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4(base,31);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8(base,31);
+}
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle-3.c
new file mode 100644
index 00000000000..ffc9fd5bcbe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle-3.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t 
+test___riscv_vle8_v_i8mf8(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8(base,32);
+}
+
+vint8mf4_t 
+test___riscv_vle8_v_i8mf4(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4(base,32);
+}
+
+vint8mf2_t 
+test___riscv_vle8_v_i8mf2(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2(base,32);
+}
+
+vint8m1_t 
+test___riscv_vle8_v_i8m1(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1(base,32);
+}
+
+vint8m2_t 
+test___riscv_vle8_v_i8m2(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2(base,32);
+}
+
+vint8m4_t 
+test___riscv_vle8_v_i8m4(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4(base,32);
+}
+
+vint8m8_t 
+test___riscv_vle8_v_i8m8(int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8(base,32);
+}
+
+vuint8mf8_t 
+test___riscv_vle8_v_u8mf8(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8(base,32);
+}
+
+vuint8mf4_t 
+test___riscv_vle8_v_u8mf4(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4(base,32);
+}
+
+vuint8mf2_t 
+test___riscv_vle8_v_u8mf2(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2(base,32);
+}
+
+vuint8m1_t 
+test___riscv_vle8_v_u8m1(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1(base,32);
+}
+
+vuint8m2_t 
+test___riscv_vle8_v_u8m2(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2(base,32);
+}
+
+vuint8m4_t 
+test___riscv_vle8_v_u8m4(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4(base,32);
+}
+
+vuint8m8_t 
+test___riscv_vle8_v_u8m8(uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8(base,32);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4(base,32);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2(base,32);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1(base,32);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2(base,32);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4(base,32);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8(int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8(base,32);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4(base,32);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2(base,32);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1(base,32);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2(base,32);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4(base,32);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8(uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8(base,32);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2(base,32);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1(base,32);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2(base,32);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4(base,32);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8(int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8(base,32);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2(base,32);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1(base,32);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2(base,32);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4(base,32);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8(uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8(base,32);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2(base,32);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1(base,32);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2(base,32);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4(base,32);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8(float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8(base,32);
+}
+vint64m1_t
+test___riscv_vle64_v_i64m1(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1(base,32);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2(base,32);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4(base,32);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8(int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8(base,32);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1(base,32);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2(base,32);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4(base,32);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8(uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8(base,32);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1(base,32);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2(base,32);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4(base,32);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8(double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8(base,32);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-1.c
new file mode 100644
index 00000000000..7944f5ef8f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-1.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_m(vbool64_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_m(mask,base,vl);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_m(vbool32_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_m(mask,base,vl);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_m(vbool16_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_m(mask,base,vl);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_m(vbool8_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_m(mask,base,vl);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_m(vbool4_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_m(mask,base,vl);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_m(vbool2_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_m(mask,base,vl);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_m(vbool1_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_m(mask,base,vl);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_m(vbool64_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_m(mask,base,vl);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_m(vbool32_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_m(mask,base,vl);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_m(vbool16_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_m(mask,base,vl);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_m(vbool8_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_m(mask,base,vl);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_m(vbool4_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_m(mask,base,vl);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_m(vbool2_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_m(mask,base,vl);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_m(vbool1_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_m(mask,base,vl);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_m(vbool64_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_m(mask,base,vl);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_m(vbool32_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_m(mask,base,vl);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_m(vbool16_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_m(mask,base,vl);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_m(vbool8_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_m(mask,base,vl);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_m(vbool4_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_m(mask,base,vl);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_m(vbool2_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_m(mask,base,vl);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_m(vbool64_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_m(mask,base,vl);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_m(vbool32_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_m(mask,base,vl);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_m(vbool16_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_m(mask,base,vl);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_m(vbool8_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_m(mask,base,vl);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_m(vbool4_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_m(mask,base,vl);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_m(vbool2_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_m(mask,base,vl);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_m(vbool64_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_m(mask,base,vl);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_m(vbool32_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_m(mask,base,vl);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_m(vbool16_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_m(mask,base,vl);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_m(vbool8_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_m(mask,base,vl);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_m(vbool4_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_m(mask,base,vl);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_m(vbool64_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_m(mask,base,vl);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_m(vbool32_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_m(mask,base,vl);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_m(vbool16_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_m(mask,base,vl);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_m(vbool8_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_m(mask,base,vl);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_m(vbool4_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_m(mask,base,vl);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_m(vbool64_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_m(mask,base,vl);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_m(vbool32_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_m(mask,base,vl);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_m(vbool16_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_m(mask,base,vl);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_m(vbool8_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_m(mask,base,vl);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_m(vbool4_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_m(mask,base,vl);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_m(vbool64_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_m(mask,base,vl);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_m(vbool32_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_m(mask,base,vl);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_m(vbool16_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_m(mask,base,vl);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_m(vbool8_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_m(mask,base,vl);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_m(vbool64_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_m(mask,base,vl);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_m(vbool32_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_m(mask,base,vl);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_m(vbool16_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_m(mask,base,vl);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_m(vbool8_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_m(mask,base,vl);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_m(vbool64_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_m(mask,base,vl);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_m(vbool32_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_m(mask,base,vl);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_m(vbool16_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_m(mask,base,vl);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_m(vbool8_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_m(mask,base,vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-2.c
new file mode 100644
index 00000000000..7d5f6016afb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-2.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_m(vbool64_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_m(mask,base,31);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_m(vbool32_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_m(mask,base,31);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_m(vbool16_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_m(mask,base,31);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_m(vbool8_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_m(mask,base,31);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_m(vbool4_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_m(mask,base,31);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_m(vbool2_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_m(mask,base,31);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_m(vbool1_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_m(mask,base,31);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_m(vbool64_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_m(mask,base,31);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_m(vbool32_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_m(mask,base,31);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_m(vbool16_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_m(mask,base,31);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_m(vbool8_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_m(mask,base,31);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_m(vbool4_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_m(mask,base,31);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_m(vbool2_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_m(mask,base,31);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_m(vbool1_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_m(mask,base,31);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_m(vbool64_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_m(mask,base,31);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_m(vbool32_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_m(mask,base,31);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_m(vbool16_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_m(mask,base,31);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_m(vbool8_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_m(mask,base,31);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_m(vbool4_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_m(mask,base,31);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_m(vbool2_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_m(mask,base,31);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_m(vbool64_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_m(mask,base,31);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_m(vbool32_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_m(mask,base,31);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_m(vbool16_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_m(mask,base,31);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_m(vbool8_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_m(mask,base,31);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_m(vbool4_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_m(mask,base,31);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_m(vbool2_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_m(mask,base,31);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_m(vbool64_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_m(mask,base,31);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_m(vbool32_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_m(mask,base,31);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_m(vbool16_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_m(mask,base,31);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_m(vbool8_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_m(mask,base,31);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_m(vbool4_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_m(mask,base,31);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_m(vbool64_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_m(mask,base,31);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_m(vbool32_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_m(mask,base,31);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_m(vbool16_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_m(mask,base,31);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_m(vbool8_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_m(mask,base,31);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_m(vbool4_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_m(mask,base,31);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_m(vbool64_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_m(mask,base,31);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_m(vbool32_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_m(mask,base,31);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_m(vbool16_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_m(mask,base,31);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_m(vbool8_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_m(mask,base,31);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_m(vbool4_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_m(mask,base,31);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_m(vbool64_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_m(mask,base,31);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_m(vbool32_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_m(mask,base,31);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_m(vbool16_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_m(mask,base,31);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_m(vbool8_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_m(mask,base,31);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_m(vbool64_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_m(mask,base,31);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_m(vbool32_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_m(mask,base,31);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_m(vbool16_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_m(mask,base,31);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_m(vbool8_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_m(mask,base,31);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_m(vbool64_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_m(mask,base,31);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_m(vbool32_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_m(mask,base,31);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_m(vbool16_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_m(mask,base,31);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_m(vbool8_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_m(mask,base,31);
+}
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-3.c
new file mode 100644
index 00000000000..a3aca9f7815
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_m-3.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_m(vbool64_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_m(mask,base,32);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_m(vbool32_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_m(mask,base,32);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_m(vbool16_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_m(mask,base,32);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_m(vbool8_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_m(mask,base,32);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_m(vbool4_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_m(mask,base,32);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_m(vbool2_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_m(mask,base,32);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_m(vbool1_t mask,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_m(mask,base,32);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_m(vbool64_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_m(mask,base,32);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_m(vbool32_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_m(mask,base,32);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_m(vbool16_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_m(mask,base,32);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_m(vbool8_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_m(mask,base,32);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_m(vbool4_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_m(mask,base,32);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_m(vbool2_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_m(mask,base,32);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_m(vbool1_t mask,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_m(mask,base,32);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_m(vbool64_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_m(mask,base,32);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_m(vbool32_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_m(mask,base,32);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_m(vbool16_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_m(mask,base,32);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_m(vbool8_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_m(mask,base,32);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_m(vbool4_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_m(mask,base,32);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_m(vbool2_t mask,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_m(mask,base,32);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_m(vbool64_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_m(mask,base,32);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_m(vbool32_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_m(mask,base,32);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_m(vbool16_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_m(mask,base,32);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_m(vbool8_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_m(mask,base,32);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_m(vbool4_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_m(mask,base,32);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_m(vbool2_t mask,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_m(mask,base,32);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_m(vbool64_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_m(mask,base,32);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_m(vbool32_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_m(mask,base,32);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_m(vbool16_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_m(mask,base,32);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_m(vbool8_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_m(mask,base,32);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_m(vbool4_t mask,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_m(mask,base,32);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_m(vbool64_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_m(mask,base,32);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_m(vbool32_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_m(mask,base,32);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_m(vbool16_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_m(mask,base,32);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_m(vbool8_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_m(mask,base,32);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_m(vbool4_t mask,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_m(mask,base,32);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_m(vbool64_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_m(mask,base,32);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_m(vbool32_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_m(mask,base,32);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_m(vbool16_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_m(mask,base,32);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_m(vbool8_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_m(mask,base,32);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_m(vbool4_t mask,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_m(mask,base,32);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_m(vbool64_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_m(mask,base,32);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_m(vbool32_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_m(mask,base,32);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_m(vbool16_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_m(mask,base,32);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_m(vbool8_t mask,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_m(mask,base,32);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_m(vbool64_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_m(mask,base,32);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_m(vbool32_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_m(mask,base,32);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_m(vbool16_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_m(mask,base,32);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_m(vbool8_t mask,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_m(mask,base,32);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_m(vbool64_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_m(mask,base,32);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_m(vbool32_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_m(mask,base,32);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_m(vbool16_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_m(mask,base,32);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_m(vbool8_t mask,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_m(mask,base,32);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-1.c
new file mode 100644
index 00000000000..25449911625
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-1.c
@@ -0,0 +1,344 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_mu(mask,merge,base,vl);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_mu(mask,merge,base,vl);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_mu(mask,merge,base,vl);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_mu(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_mu(mask,merge,base,vl);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_mu(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_mu(mask,merge,base,vl);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_mu(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_mu(mask,merge,base,vl);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_mu(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_mu(mask,merge,base,vl);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_mu(mask,merge,base,vl);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_mu(mask,merge,base,vl);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_mu(mask,merge,base,vl);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_mu(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_mu(mask,merge,base,vl);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_mu(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_mu(mask,merge,base,vl);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_mu(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_mu(mask,merge,base,vl);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_mu(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_mu(mask,merge,base,vl);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_mu(mask,merge,base,vl);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_mu(mask,merge,base,vl);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_mu(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_mu(mask,merge,base,vl);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_mu(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_mu(mask,merge,base,vl);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_mu(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_mu(mask,merge,base,vl);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_mu(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_mu(mask,merge,base,vl);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_mu(mask,merge,base,vl);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_mu(mask,merge,base,vl);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_mu(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_mu(mask,merge,base,vl);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_mu(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_mu(mask,merge,base,vl);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_mu(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_mu(mask,merge,base,vl);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_mu(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_mu(mask,merge,base,vl);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_mu(mask,merge,base,vl);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_mu(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_mu(mask,merge,base,vl);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_mu(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_mu(mask,merge,base,vl);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_mu(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_mu(mask,merge,base,vl);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_mu(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_mu(mask,merge,base,vl);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_mu(mask,merge,base,vl);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_mu(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_mu(mask,merge,base,vl);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_mu(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_mu(mask,merge,base,vl);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_mu(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_mu(mask,merge,base,vl);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_mu(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_mu(mask,merge,base,vl);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_mu(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_mu(mask,merge,base,vl);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_mu(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_mu(mask,merge,base,vl);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_mu(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_mu(mask,merge,base,vl);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_mu(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_mu(mask,merge,base,vl);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_mu(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_mu(mask,merge,base,vl);
+}
+vint64m1_t
+test___riscv_vle64_v_i64m1_mu(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_mu(mask,merge,base,vl);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_mu(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_mu(mask,merge,base,vl);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_mu(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_mu(mask,merge,base,vl);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_mu(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_mu(mask,merge,base,vl);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_mu(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_mu(mask,merge,base,vl);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_mu(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_mu(mask,merge,base,vl);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_mu(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_mu(mask,merge,base,vl);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_mu(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_mu(mask,merge,base,vl);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_mu(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_mu(mask,merge,base,vl);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_mu(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_mu(mask,merge,base,vl);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_mu(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_mu(mask,merge,base,vl);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_mu(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_mu(mask,merge,base,vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-2.c
new file mode 100644
index 00000000000..9440cb1a405
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-2.c
@@ -0,0 +1,344 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_mu(mask,merge,base,31);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_mu(mask,merge,base,31);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_mu(mask,merge,base,31);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_mu(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_mu(mask,merge,base,31);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_mu(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_mu(mask,merge,base,31);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_mu(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_mu(mask,merge,base,31);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_mu(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_mu(mask,merge,base,31);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_mu(mask,merge,base,31);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_mu(mask,merge,base,31);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_mu(mask,merge,base,31);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_mu(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_mu(mask,merge,base,31);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_mu(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_mu(mask,merge,base,31);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_mu(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_mu(mask,merge,base,31);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_mu(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_mu(mask,merge,base,31);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_mu(mask,merge,base,31);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_mu(mask,merge,base,31);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_mu(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_mu(mask,merge,base,31);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_mu(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_mu(mask,merge,base,31);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_mu(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_mu(mask,merge,base,31);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_mu(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_mu(mask,merge,base,31);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_mu(mask,merge,base,31);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_mu(mask,merge,base,31);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_mu(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_mu(mask,merge,base,31);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_mu(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_mu(mask,merge,base,31);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_mu(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_mu(mask,merge,base,31);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_mu(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_mu(mask,merge,base,31);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_mu(mask,merge,base,31);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_mu(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_mu(mask,merge,base,31);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_mu(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_mu(mask,merge,base,31);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_mu(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_mu(mask,merge,base,31);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_mu(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_mu(mask,merge,base,31);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_mu(mask,merge,base,31);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_mu(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_mu(mask,merge,base,31);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_mu(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_mu(mask,merge,base,31);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_mu(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_mu(mask,merge,base,31);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_mu(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_mu(mask,merge,base,31);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_mu(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_mu(mask,merge,base,31);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_mu(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_mu(mask,merge,base,31);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_mu(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_mu(mask,merge,base,31);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_mu(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_mu(mask,merge,base,31);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_mu(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_mu(mask,merge,base,31);
+}
+vint64m1_t
+test___riscv_vle64_v_i64m1_mu(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_mu(mask,merge,base,31);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_mu(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_mu(mask,merge,base,31);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_mu(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_mu(mask,merge,base,31);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_mu(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_mu(mask,merge,base,31);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_mu(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_mu(mask,merge,base,31);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_mu(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_mu(mask,merge,base,31);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_mu(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_mu(mask,merge,base,31);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_mu(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_mu(mask,merge,base,31);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_mu(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_mu(mask,merge,base,31);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_mu(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_mu(mask,merge,base,31);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_mu(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_mu(mask,merge,base,31);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_mu(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_mu(mask,merge,base,31);
+}
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-3.c
new file mode 100644
index 00000000000..f1eea5fa578
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_mu-3.c
@@ -0,0 +1,344 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_mu(mask,merge,base,32);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_mu(mask,merge,base,32);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_mu(mask,merge,base,32);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_mu(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_mu(mask,merge,base,32);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_mu(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_mu(mask,merge,base,32);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_mu(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_mu(mask,merge,base,32);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_mu(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_mu(mask,merge,base,32);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_mu(mask,merge,base,32);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_mu(mask,merge,base,32);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_mu(mask,merge,base,32);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_mu(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_mu(mask,merge,base,32);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_mu(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_mu(mask,merge,base,32);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_mu(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_mu(mask,merge,base,32);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_mu(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_mu(mask,merge,base,32);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_mu(mask,merge,base,32);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_mu(mask,merge,base,32);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_mu(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_mu(mask,merge,base,32);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_mu(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_mu(mask,merge,base,32);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_mu(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_mu(mask,merge,base,32);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_mu(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_mu(mask,merge,base,32);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_mu(mask,merge,base,32);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_mu(mask,merge,base,32);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_mu(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_mu(mask,merge,base,32);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_mu(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_mu(mask,merge,base,32);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_mu(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_mu(mask,merge,base,32);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_mu(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_mu(mask,merge,base,32);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_mu(mask,merge,base,32);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_mu(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_mu(mask,merge,base,32);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_mu(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_mu(mask,merge,base,32);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_mu(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_mu(mask,merge,base,32);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_mu(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_mu(mask,merge,base,32);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_mu(mask,merge,base,32);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_mu(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_mu(mask,merge,base,32);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_mu(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_mu(mask,merge,base,32);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_mu(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_mu(mask,merge,base,32);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_mu(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_mu(mask,merge,base,32);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_mu(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_mu(mask,merge,base,32);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_mu(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_mu(mask,merge,base,32);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_mu(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_mu(mask,merge,base,32);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_mu(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_mu(mask,merge,base,32);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_mu(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_mu(mask,merge,base,32);
+}
+vint64m1_t
+test___riscv_vle64_v_i64m1_mu(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_mu(mask,merge,base,32);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_mu(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_mu(mask,merge,base,32);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_mu(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_mu(mask,merge,base,32);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_mu(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_mu(mask,merge,base,32);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_mu(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_mu(mask,merge,base,32);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_mu(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_mu(mask,merge,base,32);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_mu(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_mu(mask,merge,base,32);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_mu(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_mu(mask,merge,base,32);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_mu(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_mu(mask,merge,base,32);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_mu(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_mu(mask,merge,base,32);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_mu(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_mu(mask,merge,base,32);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_mu(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_mu(mask,merge,base,32);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-1.c
new file mode 100644
index 00000000000..2f55f6ace71
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-1.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tu(vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tu(merge,base,vl);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tu(vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tu(merge,base,vl);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tu(vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tu(merge,base,vl);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tu(vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tu(merge,base,vl);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tu(vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tu(merge,base,vl);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tu(vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tu(merge,base,vl);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tu(vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tu(merge,base,vl);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tu(vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tu(merge,base,vl);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tu(vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tu(merge,base,vl);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tu(vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tu(merge,base,vl);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tu(vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tu(merge,base,vl);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tu(vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tu(merge,base,vl);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tu(vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tu(merge,base,vl);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tu(vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tu(merge,base,vl);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tu(vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tu(merge,base,vl);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tu(vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tu(merge,base,vl);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tu(vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tu(merge,base,vl);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tu(vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tu(merge,base,vl);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tu(vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tu(merge,base,vl);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tu(vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tu(merge,base,vl);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tu(vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tu(merge,base,vl);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tu(vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tu(merge,base,vl);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tu(vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tu(merge,base,vl);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tu(vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tu(merge,base,vl);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tu(vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tu(merge,base,vl);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tu(vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tu(merge,base,vl);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tu(vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tu(merge,base,vl);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tu(vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tu(merge,base,vl);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tu(vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tu(merge,base,vl);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tu(vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tu(merge,base,vl);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tu(vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tu(merge,base,vl);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tu(vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tu(merge,base,vl);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tu(vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tu(merge,base,vl);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tu(vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tu(merge,base,vl);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tu(vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tu(merge,base,vl);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tu(vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tu(merge,base,vl);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tu(vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tu(merge,base,vl);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tu(vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tu(merge,base,vl);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tu(vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tu(merge,base,vl);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tu(vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tu(merge,base,vl);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tu(vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tu(merge,base,vl);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tu(vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tu(merge,base,vl);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tu(vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tu(merge,base,vl);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tu(vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tu(merge,base,vl);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tu(vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tu(merge,base,vl);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tu(vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tu(merge,base,vl);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tu(vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tu(merge,base,vl);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tu(vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tu(merge,base,vl);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tu(vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tu(merge,base,vl);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tu(vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tu(merge,base,vl);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tu(vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tu(merge,base,vl);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tu(vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tu(merge,base,vl);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tu(vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tu(merge,base,vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-2.c
new file mode 100644
index 00000000000..67173563baa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-2.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tu(vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tu(merge,base,31);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tu(vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tu(merge,base,31);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tu(vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tu(merge,base,31);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tu(vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tu(merge,base,31);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tu(vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tu(merge,base,31);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tu(vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tu(merge,base,31);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tu(vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tu(merge,base,31);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tu(vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tu(merge,base,31);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tu(vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tu(merge,base,31);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tu(vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tu(merge,base,31);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tu(vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tu(merge,base,31);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tu(vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tu(merge,base,31);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tu(vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tu(merge,base,31);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tu(vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tu(merge,base,31);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tu(vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tu(merge,base,31);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tu(vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tu(merge,base,31);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tu(vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tu(merge,base,31);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tu(vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tu(merge,base,31);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tu(vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tu(merge,base,31);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tu(vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tu(merge,base,31);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tu(vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tu(merge,base,31);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tu(vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tu(merge,base,31);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tu(vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tu(merge,base,31);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tu(vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tu(merge,base,31);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tu(vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tu(merge,base,31);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tu(vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tu(merge,base,31);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tu(vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tu(merge,base,31);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tu(vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tu(merge,base,31);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tu(vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tu(merge,base,31);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tu(vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tu(merge,base,31);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tu(vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tu(merge,base,31);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tu(vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tu(merge,base,31);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tu(vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tu(merge,base,31);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tu(vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tu(merge,base,31);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tu(vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tu(merge,base,31);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tu(vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tu(merge,base,31);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tu(vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tu(merge,base,31);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tu(vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tu(merge,base,31);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tu(vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tu(merge,base,31);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tu(vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tu(merge,base,31);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tu(vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tu(merge,base,31);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tu(vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tu(merge,base,31);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tu(vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tu(merge,base,31);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tu(vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tu(merge,base,31);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tu(vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tu(merge,base,31);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tu(vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tu(merge,base,31);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tu(vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tu(merge,base,31);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tu(vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tu(merge,base,31);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tu(vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tu(merge,base,31);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tu(vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tu(merge,base,31);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tu(vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tu(merge,base,31);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tu(vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tu(merge,base,31);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tu(vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tu(merge,base,31);
+}
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-3.c
new file mode 100644
index 00000000000..2becefecf2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tu-3.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tu(vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tu(merge,base,32);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tu(vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tu(merge,base,32);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tu(vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tu(merge,base,32);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tu(vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tu(merge,base,32);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tu(vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tu(merge,base,32);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tu(vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tu(merge,base,32);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tu(vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tu(merge,base,32);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tu(vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tu(merge,base,32);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tu(vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tu(merge,base,32);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tu(vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tu(merge,base,32);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tu(vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tu(merge,base,32);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tu(vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tu(merge,base,32);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tu(vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tu(merge,base,32);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tu(vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tu(merge,base,32);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tu(vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tu(merge,base,32);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tu(vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tu(merge,base,32);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tu(vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tu(merge,base,32);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tu(vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tu(merge,base,32);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tu(vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tu(merge,base,32);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tu(vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tu(merge,base,32);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tu(vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tu(merge,base,32);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tu(vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tu(merge,base,32);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tu(vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tu(merge,base,32);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tu(vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tu(merge,base,32);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tu(vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tu(merge,base,32);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tu(vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tu(merge,base,32);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tu(vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tu(merge,base,32);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tu(vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tu(merge,base,32);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tu(vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tu(merge,base,32);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tu(vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tu(merge,base,32);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tu(vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tu(merge,base,32);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tu(vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tu(merge,base,32);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tu(vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tu(merge,base,32);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tu(vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tu(merge,base,32);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tu(vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tu(merge,base,32);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tu(vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tu(merge,base,32);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tu(vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tu(merge,base,32);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tu(vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tu(merge,base,32);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tu(vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tu(merge,base,32);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tu(vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tu(merge,base,32);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tu(vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tu(merge,base,32);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tu(vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tu(merge,base,32);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tu(vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tu(merge,base,32);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tu(vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tu(merge,base,32);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tu(vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tu(merge,base,32);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tu(vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tu(merge,base,32);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tu(vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tu(merge,base,32);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tu(vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tu(merge,base,32);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tu(vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tu(merge,base,32);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tu(vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tu(merge,base,32);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tu(vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tu(merge,base,32);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tu(vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tu(merge,base,32);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tu(vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tu(merge,base,32);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\)} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-1.c
new file mode 100644
index 00000000000..ae2606d3b69
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-1.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tum(mask,merge,base,vl);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tum(mask,merge,base,vl);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tum(mask,merge,base,vl);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tum(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tum(mask,merge,base,vl);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tum(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tum(mask,merge,base,vl);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tum(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tum(mask,merge,base,vl);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tum(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tum(mask,merge,base,vl);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tum(mask,merge,base,vl);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tum(mask,merge,base,vl);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tum(mask,merge,base,vl);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tum(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tum(mask,merge,base,vl);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tum(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tum(mask,merge,base,vl);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tum(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tum(mask,merge,base,vl);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tum(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tum(mask,merge,base,vl);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tum(mask,merge,base,vl);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tum(mask,merge,base,vl);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tum(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tum(mask,merge,base,vl);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tum(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tum(mask,merge,base,vl);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tum(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tum(mask,merge,base,vl);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tum(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tum(mask,merge,base,vl);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tum(mask,merge,base,vl);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tum(mask,merge,base,vl);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tum(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tum(mask,merge,base,vl);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tum(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tum(mask,merge,base,vl);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tum(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tum(mask,merge,base,vl);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tum(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tum(mask,merge,base,vl);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tum(mask,merge,base,vl);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tum(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tum(mask,merge,base,vl);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tum(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tum(mask,merge,base,vl);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tum(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tum(mask,merge,base,vl);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tum(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tum(mask,merge,base,vl);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tum(mask,merge,base,vl);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tum(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tum(mask,merge,base,vl);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tum(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tum(mask,merge,base,vl);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tum(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tum(mask,merge,base,vl);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tum(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tum(mask,merge,base,vl);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tum(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tum(mask,merge,base,vl);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tum(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tum(mask,merge,base,vl);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tum(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tum(mask,merge,base,vl);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tum(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tum(mask,merge,base,vl);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tum(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tum(mask,merge,base,vl);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tum(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tum(mask,merge,base,vl);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tum(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tum(mask,merge,base,vl);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tum(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tum(mask,merge,base,vl);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tum(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tum(mask,merge,base,vl);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tum(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tum(mask,merge,base,vl);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tum(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tum(mask,merge,base,vl);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tum(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tum(mask,merge,base,vl);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tum(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tum(mask,merge,base,vl);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tum(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tum(mask,merge,base,vl);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tum(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tum(mask,merge,base,vl);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tum(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tum(mask,merge,base,vl);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tum(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tum(mask,merge,base,vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-2.c
new file mode 100644
index 00000000000..ec697bef5cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-2.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tum(mask,merge,base,31);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tum(mask,merge,base,31);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tum(mask,merge,base,31);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tum(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tum(mask,merge,base,31);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tum(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tum(mask,merge,base,31);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tum(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tum(mask,merge,base,31);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tum(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tum(mask,merge,base,31);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tum(mask,merge,base,31);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tum(mask,merge,base,31);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tum(mask,merge,base,31);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tum(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tum(mask,merge,base,31);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tum(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tum(mask,merge,base,31);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tum(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tum(mask,merge,base,31);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tum(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tum(mask,merge,base,31);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tum(mask,merge,base,31);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tum(mask,merge,base,31);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tum(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tum(mask,merge,base,31);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tum(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tum(mask,merge,base,31);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tum(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tum(mask,merge,base,31);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tum(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tum(mask,merge,base,31);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tum(mask,merge,base,31);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tum(mask,merge,base,31);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tum(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tum(mask,merge,base,31);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tum(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tum(mask,merge,base,31);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tum(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tum(mask,merge,base,31);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tum(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tum(mask,merge,base,31);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tum(mask,merge,base,31);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tum(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tum(mask,merge,base,31);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tum(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tum(mask,merge,base,31);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tum(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tum(mask,merge,base,31);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tum(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tum(mask,merge,base,31);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tum(mask,merge,base,31);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tum(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tum(mask,merge,base,31);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tum(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tum(mask,merge,base,31);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tum(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tum(mask,merge,base,31);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tum(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tum(mask,merge,base,31);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tum(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tum(mask,merge,base,31);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tum(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tum(mask,merge,base,31);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tum(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tum(mask,merge,base,31);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tum(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tum(mask,merge,base,31);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tum(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tum(mask,merge,base,31);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tum(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tum(mask,merge,base,31);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tum(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tum(mask,merge,base,31);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tum(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tum(mask,merge,base,31);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tum(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tum(mask,merge,base,31);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tum(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tum(mask,merge,base,31);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tum(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tum(mask,merge,base,31);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tum(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tum(mask,merge,base,31);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tum(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tum(mask,merge,base,31);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tum(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tum(mask,merge,base,31);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tum(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tum(mask,merge,base,31);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tum(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tum(mask,merge,base,31);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tum(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tum(mask,merge,base,31);
+}
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-3.c
new file mode 100644
index 00000000000..0a9fc78a002
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tum-3.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tum(mask,merge,base,32);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tum(mask,merge,base,32);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tum(mask,merge,base,32);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tum(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tum(mask,merge,base,32);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tum(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tum(mask,merge,base,32);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tum(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tum(mask,merge,base,32);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tum(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tum(mask,merge,base,32);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tum(mask,merge,base,32);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tum(mask,merge,base,32);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tum(mask,merge,base,32);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tum(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tum(mask,merge,base,32);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tum(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tum(mask,merge,base,32);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tum(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tum(mask,merge,base,32);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tum(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tum(mask,merge,base,32);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tum(mask,merge,base,32);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tum(mask,merge,base,32);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tum(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tum(mask,merge,base,32);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tum(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tum(mask,merge,base,32);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tum(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tum(mask,merge,base,32);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tum(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tum(mask,merge,base,32);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tum(mask,merge,base,32);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tum(mask,merge,base,32);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tum(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tum(mask,merge,base,32);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tum(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tum(mask,merge,base,32);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tum(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tum(mask,merge,base,32);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tum(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tum(mask,merge,base,32);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tum(mask,merge,base,32);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tum(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tum(mask,merge,base,32);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tum(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tum(mask,merge,base,32);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tum(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tum(mask,merge,base,32);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tum(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tum(mask,merge,base,32);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tum(mask,merge,base,32);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tum(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tum(mask,merge,base,32);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tum(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tum(mask,merge,base,32);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tum(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tum(mask,merge,base,32);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tum(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tum(mask,merge,base,32);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tum(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tum(mask,merge,base,32);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tum(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tum(mask,merge,base,32);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tum(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tum(mask,merge,base,32);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tum(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tum(mask,merge,base,32);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tum(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tum(mask,merge,base,32);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tum(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tum(mask,merge,base,32);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tum(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tum(mask,merge,base,32);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tum(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tum(mask,merge,base,32);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tum(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tum(mask,merge,base,32);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tum(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tum(mask,merge,base,32);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tum(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tum(mask,merge,base,32);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tum(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tum(mask,merge,base,32);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tum(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tum(mask,merge,base,32);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tum(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tum(mask,merge,base,32);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tum(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tum(mask,merge,base,32);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tum(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tum(mask,merge,base,32);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tum(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tum(mask,merge,base,32);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-1.c
new file mode 100644
index 00000000000..b21e18feb29
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-1.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tumu(mask,merge,base,vl);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tumu(mask,merge,base,vl);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tumu(mask,merge,base,vl);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tumu(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tumu(mask,merge,base,vl);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tumu(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tumu(mask,merge,base,vl);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tumu(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tumu(mask,merge,base,vl);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tumu(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tumu(mask,merge,base,vl);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tumu(mask,merge,base,vl);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tumu(mask,merge,base,vl);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tumu(mask,merge,base,vl);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tumu(mask,merge,base,vl);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tumu(mask,merge,base,vl);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tumu(mask,merge,base,vl);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tumu(mask,merge,base,vl);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tumu(mask,merge,base,vl);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tumu(mask,merge,base,vl);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tumu(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tumu(mask,merge,base,vl);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tumu(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tumu(mask,merge,base,vl);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tumu(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tumu(mask,merge,base,vl);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tumu(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tumu(mask,merge,base,vl);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tumu(mask,merge,base,vl);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tumu(mask,merge,base,vl);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tumu(mask,merge,base,vl);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tumu(mask,merge,base,vl);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tumu(mask,merge,base,vl);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tumu(mask,merge,base,vl);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tumu(mask,merge,base,vl);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tumu(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tumu(mask,merge,base,vl);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tumu(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tumu(mask,merge,base,vl);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tumu(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tumu(mask,merge,base,vl);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tumu(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tumu(mask,merge,base,vl);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tumu(mask,merge,base,vl);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tumu(mask,merge,base,vl);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tumu(mask,merge,base,vl);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tumu(mask,merge,base,vl);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tumu(mask,merge,base,vl);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tumu(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tumu(mask,merge,base,vl);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tumu(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tumu(mask,merge,base,vl);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tumu(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tumu(mask,merge,base,vl);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tumu(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tumu(mask,merge,base,vl);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tumu(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tumu(mask,merge,base,vl);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tumu(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tumu(mask,merge,base,vl);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tumu(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tumu(mask,merge,base,vl);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tumu(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tumu(mask,merge,base,vl);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tumu(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tumu(mask,merge,base,vl);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tumu(mask,merge,base,vl);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tumu(mask,merge,base,vl);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tumu(mask,merge,base,vl);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tumu(mask,merge,base,vl);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tumu(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tumu(mask,merge,base,vl);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tumu(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tumu(mask,merge,base,vl);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tumu(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tumu(mask,merge,base,vl);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tumu(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tumu(mask,merge,base,vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-2.c
new file mode 100644
index 00000000000..d2df6c47e5b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-2.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tumu(mask,merge,base,31);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tumu(mask,merge,base,31);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tumu(mask,merge,base,31);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tumu(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tumu(mask,merge,base,31);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tumu(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tumu(mask,merge,base,31);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tumu(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tumu(mask,merge,base,31);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tumu(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tumu(mask,merge,base,31);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tumu(mask,merge,base,31);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tumu(mask,merge,base,31);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tumu(mask,merge,base,31);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tumu(mask,merge,base,31);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tumu(mask,merge,base,31);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tumu(mask,merge,base,31);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tumu(mask,merge,base,31);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tumu(mask,merge,base,31);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tumu(mask,merge,base,31);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tumu(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tumu(mask,merge,base,31);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tumu(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tumu(mask,merge,base,31);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tumu(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tumu(mask,merge,base,31);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tumu(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tumu(mask,merge,base,31);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tumu(mask,merge,base,31);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tumu(mask,merge,base,31);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tumu(mask,merge,base,31);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tumu(mask,merge,base,31);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tumu(mask,merge,base,31);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tumu(mask,merge,base,31);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tumu(mask,merge,base,31);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tumu(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tumu(mask,merge,base,31);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tumu(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tumu(mask,merge,base,31);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tumu(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tumu(mask,merge,base,31);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tumu(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tumu(mask,merge,base,31);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tumu(mask,merge,base,31);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tumu(mask,merge,base,31);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tumu(mask,merge,base,31);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tumu(mask,merge,base,31);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tumu(mask,merge,base,31);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tumu(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tumu(mask,merge,base,31);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tumu(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tumu(mask,merge,base,31);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tumu(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tumu(mask,merge,base,31);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tumu(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tumu(mask,merge,base,31);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tumu(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tumu(mask,merge,base,31);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tumu(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tumu(mask,merge,base,31);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tumu(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tumu(mask,merge,base,31);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tumu(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tumu(mask,merge,base,31);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tumu(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tumu(mask,merge,base,31);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tumu(mask,merge,base,31);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tumu(mask,merge,base,31);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tumu(mask,merge,base,31);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tumu(mask,merge,base,31);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tumu(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tumu(mask,merge,base,31);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tumu(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tumu(mask,merge,base,31);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tumu(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tumu(mask,merge,base,31);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tumu(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tumu(mask,merge,base,31);
+}
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-3.c
new file mode 100644
index 00000000000..828a5739759
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vle_tumu-3.c
@@ -0,0 +1,345 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t
+test___riscv_vle8_v_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf8_tumu(mask,merge,base,32);
+}
+
+vint8mf4_t
+test___riscv_vle8_v_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf4_tumu(mask,merge,base,32);
+}
+
+vint8mf2_t
+test___riscv_vle8_v_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8mf2_tumu(mask,merge,base,32);
+}
+
+vint8m1_t
+test___riscv_vle8_v_i8m1_tumu(vbool8_t mask,vint8m1_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m1_tumu(mask,merge,base,32);
+}
+
+vint8m2_t
+test___riscv_vle8_v_i8m2_tumu(vbool4_t mask,vint8m2_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m2_tumu(mask,merge,base,32);
+}
+
+vint8m4_t
+test___riscv_vle8_v_i8m4_tumu(vbool2_t mask,vint8m4_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m4_tumu(mask,merge,base,32);
+}
+
+vint8m8_t
+test___riscv_vle8_v_i8m8_tumu(vbool1_t mask,vint8m8_t merge,int8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_i8m8_tumu(mask,merge,base,32);
+}
+
+vuint8mf8_t
+test___riscv_vle8_v_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf8_tumu(mask,merge,base,32);
+}
+
+vuint8mf4_t
+test___riscv_vle8_v_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf4_tumu(mask,merge,base,32);
+}
+
+vuint8mf2_t
+test___riscv_vle8_v_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8mf2_tumu(mask,merge,base,32);
+}
+
+vuint8m1_t
+test___riscv_vle8_v_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m1_tumu(mask,merge,base,32);
+}
+
+vuint8m2_t
+test___riscv_vle8_v_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m2_tumu(mask,merge,base,32);
+}
+
+vuint8m4_t
+test___riscv_vle8_v_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m4_tumu(mask,merge,base,32);
+}
+
+vuint8m8_t
+test___riscv_vle8_v_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,uint8_t* base,size_t vl)
+{
+  return __riscv_vle8_v_u8m8_tumu(mask,merge,base,32);
+}
+
+vint16mf4_t
+test___riscv_vle16_v_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf4_tumu(mask,merge,base,32);
+}
+
+vint16mf2_t
+test___riscv_vle16_v_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16mf2_tumu(mask,merge,base,32);
+}
+
+vint16m1_t
+test___riscv_vle16_v_i16m1_tumu(vbool16_t mask,vint16m1_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m1_tumu(mask,merge,base,32);
+}
+
+vint16m2_t
+test___riscv_vle16_v_i16m2_tumu(vbool8_t mask,vint16m2_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m2_tumu(mask,merge,base,32);
+}
+
+vint16m4_t
+test___riscv_vle16_v_i16m4_tumu(vbool4_t mask,vint16m4_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m4_tumu(mask,merge,base,32);
+}
+
+vint16m8_t
+test___riscv_vle16_v_i16m8_tumu(vbool2_t mask,vint16m8_t merge,int16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_i16m8_tumu(mask,merge,base,32);
+}
+
+vuint16mf4_t
+test___riscv_vle16_v_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf4_tumu(mask,merge,base,32);
+}
+
+vuint16mf2_t
+test___riscv_vle16_v_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16mf2_tumu(mask,merge,base,32);
+}
+
+vuint16m1_t
+test___riscv_vle16_v_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m1_tumu(mask,merge,base,32);
+}
+
+vuint16m2_t
+test___riscv_vle16_v_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m2_tumu(mask,merge,base,32);
+}
+
+vuint16m4_t
+test___riscv_vle16_v_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m4_tumu(mask,merge,base,32);
+}
+
+vuint16m8_t
+test___riscv_vle16_v_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,uint16_t* base,size_t vl)
+{
+  return __riscv_vle16_v_u16m8_tumu(mask,merge,base,32);
+}
+
+vint32mf2_t
+test___riscv_vle32_v_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32mf2_tumu(mask,merge,base,32);
+}
+
+vint32m1_t
+test___riscv_vle32_v_i32m1_tumu(vbool32_t mask,vint32m1_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m1_tumu(mask,merge,base,32);
+}
+
+vint32m2_t
+test___riscv_vle32_v_i32m2_tumu(vbool16_t mask,vint32m2_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m2_tumu(mask,merge,base,32);
+}
+
+vint32m4_t
+test___riscv_vle32_v_i32m4_tumu(vbool8_t mask,vint32m4_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m4_tumu(mask,merge,base,32);
+}
+
+vint32m8_t
+test___riscv_vle32_v_i32m8_tumu(vbool4_t mask,vint32m8_t merge,int32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_i32m8_tumu(mask,merge,base,32);
+}
+
+vuint32mf2_t
+test___riscv_vle32_v_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32mf2_tumu(mask,merge,base,32);
+}
+
+vuint32m1_t
+test___riscv_vle32_v_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m1_tumu(mask,merge,base,32);
+}
+
+vuint32m2_t
+test___riscv_vle32_v_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m2_tumu(mask,merge,base,32);
+}
+
+vuint32m4_t
+test___riscv_vle32_v_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m4_tumu(mask,merge,base,32);
+}
+
+vuint32m8_t
+test___riscv_vle32_v_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,uint32_t* base,size_t vl)
+{
+  return __riscv_vle32_v_u32m8_tumu(mask,merge,base,32);
+}
+
+vfloat32mf2_t
+test___riscv_vle32_v_f32mf2_tumu(vbool64_t mask,vfloat32mf2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32mf2_tumu(mask,merge,base,32);
+}
+
+vfloat32m1_t
+test___riscv_vle32_v_f32m1_tumu(vbool32_t mask,vfloat32m1_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m1_tumu(mask,merge,base,32);
+}
+
+vfloat32m2_t
+test___riscv_vle32_v_f32m2_tumu(vbool16_t mask,vfloat32m2_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m2_tumu(mask,merge,base,32);
+}
+
+vfloat32m4_t
+test___riscv_vle32_v_f32m4_tumu(vbool8_t mask,vfloat32m4_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m4_tumu(mask,merge,base,32);
+}
+
+vfloat32m8_t
+test___riscv_vle32_v_f32m8_tumu(vbool4_t mask,vfloat32m8_t merge,float* base,size_t vl)
+{
+  return __riscv_vle32_v_f32m8_tumu(mask,merge,base,32);
+}
+
+vint64m1_t
+test___riscv_vle64_v_i64m1_tumu(vbool64_t mask,vint64m1_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m1_tumu(mask,merge,base,32);
+}
+
+vint64m2_t
+test___riscv_vle64_v_i64m2_tumu(vbool32_t mask,vint64m2_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m2_tumu(mask,merge,base,32);
+}
+
+vint64m4_t
+test___riscv_vle64_v_i64m4_tumu(vbool16_t mask,vint64m4_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m4_tumu(mask,merge,base,32);
+}
+
+vint64m8_t
+test___riscv_vle64_v_i64m8_tumu(vbool8_t mask,vint64m8_t merge,int64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_i64m8_tumu(mask,merge,base,32);
+}
+
+vuint64m1_t
+test___riscv_vle64_v_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m1_tumu(mask,merge,base,32);
+}
+
+vuint64m2_t
+test___riscv_vle64_v_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m2_tumu(mask,merge,base,32);
+}
+
+vuint64m4_t
+test___riscv_vle64_v_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m4_tumu(mask,merge,base,32);
+}
+
+vuint64m8_t
+test___riscv_vle64_v_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,uint64_t* base,size_t vl)
+{
+  return __riscv_vle64_v_u64m8_tumu(mask,merge,base,32);
+}
+
+vfloat64m1_t
+test___riscv_vle64_v_f64m1_tumu(vbool64_t mask,vfloat64m1_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m1_tumu(mask,merge,base,32);
+}
+
+vfloat64m2_t
+test___riscv_vle64_v_f64m2_tumu(vbool32_t mask,vfloat64m2_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m2_tumu(mask,merge,base,32);
+}
+
+vfloat64m4_t
+test___riscv_vle64_v_f64m4_tumu(vbool16_t mask,vfloat64m4_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m4_tumu(mask,merge,base,32);
+}
+
+vfloat64m8_t
+test___riscv_vle64_v_f64m8_tumu(vbool8_t mask,vfloat64m8_t merge,double* base,size_t vl)
+{
+  return __riscv_vle64_v_f64m8_tumu(mask,merge,base,32);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vle8\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vle16\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vle32\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vle64\.v\s+v[0-9]+,\s*0\([a-x0-9]+\),\s*v0.t} 3 } } */