[15/21] Add integer intrinsics

Message ID 20220531085012.269719-16-juzhe.zhong@rivai.ai
State Committed
Headers
Series *** Add RVV (RISC-V 'V' Extension) support *** |

Commit Message

juzhe.zhong@rivai.ai May 31, 2022, 8:50 a.m. UTC
  From: zhongjuzhe <juzhe.zhong@rivai.ai>

gcc/ChangeLog:

        * config/riscv/riscv-protos.h: New functions.
        * config/riscv/riscv-vector-builtins-functions.cc (basic_alu::assemble_name): New function.
        (basic_alu::get_return_type): New function.
        (unop::get_argument_types): New function.
        (unop::can_be_overloaded_p): New function.
        (binop::get_argument_types): New function.
        (binop::can_be_overloaded_p): New function.
        (wbinop::assemble_name): New function.
        (ternop::get_argument_types): New function.
        (ternop::can_be_overloaded_p): New function.
        (vadd::expand): New function.
        (vsub::expand): New function.
        (vrsub::expand): New function.
        (vneg::expand): New function.
        (vwadd::expand): New function.
        (vwsub::expand): New function.
        (vwaddu::expand): New function.
        (vwsubu::expand): New function.
        (vwcvt::expand): New function.
        (vwcvtu::expand): New function.
        (vsext::assemble_name): New function.
        (vsext::expand): New function.
        (vzext::assemble_name): New function.
        (vzext::expand): New function.
        (vadc::expand): New function.
        (vsbc::expand): New function.
        (vmadc::assemble_name): New function.
        (vmadc::expand): New function.
        (vmsbc::assemble_name): New function.
        (vmsbc::expand): New function.
        (vand::expand): New function.
        (vor::expand): New function.
        (vxor::expand): New function.
        (vnot::expand): New function.
        (vshift::get_argument_types): New function.
        (vsll::expand): New function.
        (vsrl::expand): New function.
        (vsra::expand): New function.
        (vnsrl::expand): New function.
        (vnsra::expand): New function.
        (vncvt::expand): New function.
        (vcmp::assemble_name): New function.
        (vmseq::expand): New function.
        (vmsne::expand): New function.
        (vmslt::expand): New function.
        (vmsltu::expand): New function.
        (vmsgt::expand): New function.
        (vmsgtu::expand): New function.
        (vmsle::expand): New function.
        (vmsleu::expand): New function.
        (vmsge::expand): New function.
        (vmsgeu::expand): New function.
        (vmin::expand): New function.
        (vminu::expand): New function.
        (vmax::expand): New function.
        (vmaxu::expand): New function.
        (vmul::expand): New function.
        (vmulh::expand): New function.
        (vmulhu::expand): New function.
        (vmulhsu::expand): New function.
        (vdiv::expand): New function.
        (vdivu::expand): New function.
        (vrem::expand): New function.
        (vremu::expand): New function.
        (vwmul::expand): New function.
        (vwmulu::expand): New function.
        (vwmulsu::expand): New function.
        (vmacc::expand): New function.
        (vnmsac::expand): New function.
        (vmadd::expand): New function.
        (vnmsub::expand): New function.
        (vwmacc::expand): New function.
        (vwmaccu::expand): New function.
        (vwmaccsu::expand): New function.
        (vwmaccus::expand): New function.
        (vmerge::get_position_of_dest_arg): New function.
        (vmerge::expand): New function.
        (vmv::get_argument_types): New function.
        (vmv::can_be_overloaded_p): New function.
        (vmv::expand): New function.
        * config/riscv/riscv-vector-builtins-functions.def (vadd): New macro define.
        (vsub): New macro define.
        (vrsub): New macro define.
        (vneg): New macro define.
        (vwadd): New macro define.
        (vwsub): New macro define.
        (vwaddu): New macro define.
        (vwsubu): New macro define.
        (vwcvt): New macro define.
        (vwcvtu): New macro define.
        (vsext): New macro define.
        (vzext): New macro define.
        (vadc): New macro define.
        (vsbc): New macro define.
        (vmadc): New macro define.
        (vmsbc): New macro define.
        (vand): New macro define.
        (vor): New macro define.
        (vxor): New macro define.
        (vnot): New macro define.
        (vsll): New macro define.
        (vsrl): New macro define.
        (vsra): New macro define.
        (vnsrl): New macro define.
        (vnsra): New macro define.
        (vncvt): New macro define.
        (vmseq): New macro define.
        (vmsne): New macro define.
        (vmslt): New macro define.
        (vmsltu): New macro define.
        (vmsgt): New macro define.
        (vmsgtu): New macro define.
        (vmsle): New macro define.
        (vmsleu): New macro define.
        (vmsge): New macro define.
        (vmsgeu): New macro define.
        (vmin): New macro define.
        (vminu): New macro define.
        (vmax): New macro define.
        (vmaxu): New macro define.
        (vmul): New macro define.
        (vmulh): New macro define.
        (vmulhu): New macro define.
        (vmulhsu): New macro define.
        (vdiv): New macro define.
        (vdivu): New macro define.
        (vrem): New macro define.
        (vremu): New macro define.
        (vwmul): New macro define.
        (vwmulu): New macro define.
        (vwmulsu): New macro define.
        (vmacc): New macro define.
        (vnmsac): New macro define.
        (vmadd): New macro define.
        (vnmsub): New macro define.
        (vwmacc): New macro define.
        (vwmaccu): New macro define.
        (vwmaccsu): New macro define.
        (vwmaccus): New macro define.
        (vmerge): New macro define.
        (vmv): New macro define.
        * config/riscv/riscv-vector-builtins-functions.h (class basic_alu): New class.
        (class unop): New class.
        (class binop): New class.
        (class wbinop): New class.
        (class ternop): New class.
        (class vadd): New class.
        (class vsub): New class.
        (class vrsub): New class.
        (class vneg): New class.
        (class vwadd): New class.
        (class vwsub): New class.
        (class vwaddu): New class.
        (class vwsubu): New class.
        (class vwcvt): New class.
        (class vwcvtu): New class.
        (class vsext): New class.
        (class vzext): New class.
        (class vadc): New class.
        (class vsbc): New class.
        (class vmadc): New class.
        (class vmsbc): New class.
        (class vand): New class.
        (class vor): New class.
        (class vxor): New class.
        (class vnot): New class.
        (class vshift): New class.
        (class vsll): New class.
        (class vsrl): New class.
        (class vsra): New class.
        (class vnsrl): New class.
        (class vnsra): New class.
        (class vncvt): New class.
        (class vcmp): New class.
        (class vmseq): New class.
        (class vmsne): New class.
        (class vmslt): New class.
        (class vmsltu): New class.
        (class vmsgt): New class.
        (class vmsgtu): New class.
        (class vmsle): New class.
        (class vmsleu): New class.
        (class vmsge): New class.
        (class vmsgeu): New class.
        (class vmin): New class.
        (class vminu): New class.
        (class vmax): New class.
        (class vmaxu): New class.
        (class vmul): New class.
        (class vmulh): New class.
        (class vmulhu): New class.
        (class vmulhsu): New class.
        (class vdiv): New class.
        (class vdivu): New class.
        (class vrem): New class.
        (class vremu): New class.
        (class vwmul): New class.
        (class vwmulu): New class.
        (class vwmulsu): New class.
        (class vmacc): New class.
        (class vnmsac): New class.
        (class vmadd): New class.
        (class vnmsub): New class.
        (class vwmacc): New class.
        (class vwmaccu): New class.
        (class vwmaccsu): New class.
        (class vwmaccus): New class.
        (class vmerge): New class.
        (class vmv): New class.
        * config/riscv/riscv-vector.cc (modify_operands): Fix for UNSPEC_VSSUBU.
        (emit_op6): New function.
        (emit_op7): New function.
        * config/riscv/riscv.cc (riscv_print_operand): Add %v and %V assembly support.
        (riscv_modes_tieable_p): Adjust for RVV modes.
        * config/riscv/riscv.md: Add more code support.
        * config/riscv/vector-iterators.md: Fix iterators.
        * config/riscv/vector.md (@v<vxoptab><mode>_vx): New pattern.
        (@v<vxoptab><mode>_vxm): New pattern.
        (@vms<optab><mode>_vx): New pattern.
        (@vadd<mode>_vv): New pattern.
        (@vsub<mode>_vv): New pattern.
        (@vadd<mode>_vx_internal): New pattern.
        (@vadd<mode>_vx_32bit): New pattern.
        (@vsub<mode>_vx_internal): New pattern.
        (@vsub<mode>_vx_32bit): New pattern.
        (@vrsub<mode>_vx_internal): New pattern.
        (@vrsub<mode>_vx_32bit): New pattern.
        (@vneg<mode>_v): New pattern.
        (@vw<plus_minus:optab><u><vw>_vv): New pattern.
        (@vw<plus_minus:optab><u><vw>_vx): New pattern.
        (@vw<plus_minus:optab><u><vw>_wv): New pattern.
        (@vw<plus_minus:optab><u><vw>_wx): New pattern.
        (@vwcvt<u><vw>_x_x_v): New pattern.
        (@v<sz>ext<vw>_vf2): New pattern.
        (@v<sz>ext<vqw>_vf4): New pattern.
        (@v<sz>ext<vow>_vf8): New pattern.
        (@vadc<mode>_vvm): New pattern.
        (@vsbc<mode>_vvm): New pattern.
        (@vadc<mode>_vxm_internal): New pattern.
        (@vadc<mode>_vxm_32bit): New pattern.
        (@vsbc<mode>_vxm_internal): New pattern.
        (@vsbc<mode>_vxm_32bit): New pattern.
        (@vmadc<mode>_vvm): New pattern.
        (@vmsbc<mode>_vvm): New pattern.
        (@vmadc<mode>_vxm_internal): New pattern.
        (@vmadc<mode>_vxm_32bit): New pattern.
        (@vmsbc<mode>_vxm_internal): New pattern.
        (@vmsbc<mode>_vxm_32bit): New pattern.
        (@vmadc<mode>_vv): New pattern.
        (@vmsbc<mode>_vv): New pattern.
        (@vmadc<mode>_vx_internal): New pattern.
        (@vmadc<mode>_vx_32bit): New pattern.
        (@vmsbc<mode>_vx_internal): New pattern.
        (@vmsbc<mode>_vx_32bit): New pattern.
        (@v<optab><mode>_vv): New pattern.
        (@v<optab><mode>_vx_internal): New pattern.
        (@v<optab><mode>_vx_32bit): New pattern.
        (@vnot<mode>_v): New pattern.
        (@v<optab><mode>_vx): New pattern.
        (@vn<optab><mode>_wv): New pattern.
        (@vn<optab><mode>_wx): New pattern.
        (@vncvt<mode>_x_x_w): New pattern.
        (@vms<optab><mode>_vv): New pattern.
        (@vms<optab><mode>_vx_internal): New pattern.
        (@vms<optab><mode>_vx_32bit): New pattern.
        (*vms<optab><mode>_vx): New pattern.
        (*vms<optab><mode>_vx_32bit): New pattern.
        (@vmul<mode>_vv): New pattern.
        (@vmul<mode>_vx_internal): New pattern.
        (@vmul<mode>_vx_32bit): New pattern.
        (@vmulh<u><mode>_vv): New pattern.
        (@vmulh<u><mode>_vx_internal): New pattern.
        (@vmulh<u><mode>_vx_32bit): New pattern.
        (@vmulhsu<mode>_vv): New pattern.
        (@vmulhsu<mode>_vx_internal): New pattern.
        (@vmulhsu<mode>_vx_32bit): New pattern.
        (@vwmul<u><vw>_vv): New pattern.
        (@vwmul<u><vw>_vx): New pattern.
        (@vwmulsu<vw>_vv): New pattern.
        (@vwmulsu<vw>_vx): New pattern.
        (@v<imac><mode>_vv): New pattern.
        (@v<imac><mode>_vx_internal): New pattern.
        (@v<imac><mode>_vx_32bit): New pattern.
        (@vwmacc<u><vw>_vv): New pattern.
        (@vwmacc<u><vw>_vx): New pattern.
        (@vwmaccsu<vw>_vv): New pattern.
        (@vwmaccsu<vw>_vx): New pattern.
        (@vwmaccus<vw>_vx): New pattern.
        (@vmerge<mode>_vvm): New pattern.
        (@vmerge<mode>_vxm_internal): New pattern.
        (@vmerge<mode>_vxm_32bit): New pattern.
        (@vmv<mode>_v_v): New pattern.

---
 gcc/config/riscv/riscv-protos.h               |   24 +
 .../riscv/riscv-vector-builtins-functions.cc  |  991 +++++++
 .../riscv/riscv-vector-builtins-functions.def |   98 +
 .../riscv/riscv-vector-builtins-functions.h   |  701 +++++
 gcc/config/riscv/riscv-vector.cc              |   41 +-
 gcc/config/riscv/riscv.cc                     |   53 +
 gcc/config/riscv/riscv.md                     |   62 +-
 gcc/config/riscv/vector-iterators.md          |    3 -
 gcc/config/riscv/vector.md                    | 2575 ++++++++++++++++-
 9 files changed, 4502 insertions(+), 46 deletions(-)
  

Patch

diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h
index fd8906e47de..c47ab8e079d 100644
--- a/gcc/config/riscv/riscv-protos.h
+++ b/gcc/config/riscv/riscv-protos.h
@@ -145,6 +145,30 @@  emit_op5 (
   bool (*imm_p) (rtx),
   int i, bool reverse
 );
+extern void
+emit_op6 (
+  unsigned int unspec,
+  machine_mode Vmode, machine_mode VSImode, machine_mode VMSImode,
+  machine_mode VSUBmode,
+  rtx *operands,
+  rtx (*gen_vx) (rtx, rtx, rtx, rtx, rtx, rtx),
+  rtx (*gen_vx_32bit) (rtx, rtx, rtx, rtx, rtx, rtx),
+  rtx (*gen_vv) (rtx, rtx, rtx, rtx, rtx, rtx),
+  bool (*imm_p) (rtx),
+  int i, bool reverse
+);
+extern void
+emit_op7 (
+  unsigned int unspec,
+  machine_mode Vmode, machine_mode VSImode, machine_mode VMSImode,
+  machine_mode VSUBmode,
+  rtx *operands,
+  rtx (*gen_vx) (rtx, rtx, rtx, rtx, rtx, rtx, rtx),
+  rtx (*gen_vx_32bit) (rtx, rtx, rtx, rtx, rtx, rtx, rtx),
+  rtx (*gen_vv) (rtx, rtx, rtx, rtx, rtx, rtx, rtx),
+  bool (*imm_p) (rtx),
+  int i, bool reverse
+);
 
 /* We classify builtin types into two classes:
    1. General builtin class which is using the
diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.cc b/gcc/config/riscv/riscv-vector-builtins-functions.cc
index 0726465f146..6e0fd0b3570 100644
--- a/gcc/config/riscv/riscv-vector-builtins-functions.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-functions.cc
@@ -2072,6 +2072,997 @@  vleff::expand (const function_instance &instance, tree exp, rtx target) const
   return expand_builtin_insn (icode, exp, target, instance);
 }
 
+/* A function implementation for basic_alu functions.  */
+char *
+basic_alu::assemble_name (function_instance &instance)
+{
+  intrinsic_rename (instance, 0);
+  if (this->can_be_overloaded_p (instance))
+    {
+      append_name (instance.get_base_name ());
+      if (instance.get_operation () == OP_v_x ||
+          instance.get_operation () == OP_v_v ||
+          instance.get_operation () == OP_v_f)
+        append_name ("_v");
+      if (instance.get_operation () == OP_x_x_v ||
+          instance.get_operation () == OP_x_x_w)
+        append_name ("_x");
+      append_name (get_pred_str (instance.get_pred (), true));
+      return finish_name ();
+    }
+  return nullptr;
+}
+
+tree
+basic_alu::get_return_type (const function_instance &instance) const
+{
+  return get_dt_t_with_index (instance, 0);
+}
+
+/* A function implementation for unary functions.  */
+void
+unop::get_argument_types (const function_instance &instance,
+                          vec<tree> &argument_types) const
+{
+  argument_types.quick_push (get_dt_t_with_index (instance, 1));
+}
+
+bool
+unop::can_be_overloaded_p (const function_instance &instance) const
+{
+  if (instance.get_pred () == PRED_none)
+    return false;
+    
+  return true;
+}
+
+/* A function implementation for binary functions.  */
+void
+binop::get_argument_types (const function_instance &instance,
+                           vec<tree> &argument_types) const
+{
+  for (unsigned int i = 1; i < instance.get_arg_pattern ().arg_len; i++)
+    {
+      if (i == 2 && vector_scalar_operation_p (instance.get_operation ()))
+        {
+          machine_mode mode = GET_MODE_INNER (instance.get_arg_pattern ().arg_list[i]);
+          bool unsigned_p = is_dt_unsigned (instance.get_data_type_list ()[i]);
+          argument_types.quick_push (get_dt_t (mode, unsigned_p));
+        }
+      else
+        argument_types.quick_push (get_dt_t_with_index (instance, i));
+    }
+}
+
+bool
+binop::can_be_overloaded_p (const function_instance &) const
+{
+  return true;
+}
+
+/* A function implementation for widen binary functions.  */
+char *
+wbinop::assemble_name (function_instance &instance)
+{
+  intrinsic_rename (instance, 0);
+  append_name (instance.get_base_name ());
+  append_name (get_operation_str (instance.get_operation ()));
+  append_name (get_pred_str (instance.get_pred (), true));
+  return finish_name ();
+}
+
+/* A function implementation for ternary functions.  */
+void
+ternop::get_argument_types (const function_instance &instance,
+                            vec<tree> &argument_types) const
+{
+  if (vector_scalar_operation_p (instance.get_operation ()))
+    {
+      machine_mode mode = GET_MODE_INNER (instance.get_arg_pattern ().arg_list[1]);
+      bool unsigned_p = is_dt_unsigned (instance.get_data_type_list ()[1]);
+      argument_types.quick_push (get_dt_t (mode, unsigned_p));
+    }
+  else
+    argument_types.quick_push (get_dt_t_with_index (instance, 1));
+  for (unsigned int i = 2; i < instance.get_arg_pattern ().arg_len; i++)
+    argument_types.quick_push (get_dt_t_with_index (instance, i));
+}
+
+bool
+ternop::can_be_overloaded_p (const function_instance &) const
+{
+  return true;
+}
+
+/* A function implementation for vadd functions.  */
+rtx
+vadd::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vadd_vv (mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VADD, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+rtx
+vsub::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vsub_vv (mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VSUB, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vrsub functions.  */
+rtx
+vrsub::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode = code_for_v_vx (UNSPEC_VRSUB, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vneg functions.  */
+rtx
+vneg::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode = code_for_vneg_v (mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwadd functions.  */
+rtx
+vwadd::expand (const function_instance &instance, tree exp,
+                     rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vw_vv (PLUS, SIGN_EXTEND, mode);
+  else if (instance.get_operation () == OP_vx)
+    icode = code_for_vw_vx (PLUS, SIGN_EXTEND, mode);
+  else if (instance.get_operation () == OP_wv)
+    icode = code_for_vw_wv (PLUS, SIGN_EXTEND, mode);
+  else
+    icode = code_for_vw_wx (PLUS, SIGN_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwsub functions.  */
+rtx
+vwsub::expand (const function_instance &instance, tree exp,
+                     rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vw_vv (MINUS, SIGN_EXTEND, mode);
+  else if (instance.get_operation () == OP_vx)
+    icode = code_for_vw_vx (MINUS, SIGN_EXTEND, mode);
+  else if (instance.get_operation () == OP_wv)
+    icode = code_for_vw_wv (MINUS, SIGN_EXTEND, mode);
+  else
+    icode = code_for_vw_wx (MINUS, SIGN_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwaddu functions.  */
+rtx
+vwaddu::expand (const function_instance &instance, tree exp,
+                     rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vw_vv (PLUS, ZERO_EXTEND, mode);
+  else if (instance.get_operation () == OP_vx)
+    icode = code_for_vw_vx (PLUS, ZERO_EXTEND, mode);
+  else if (instance.get_operation () == OP_wv)
+    icode = code_for_vw_wv (PLUS, ZERO_EXTEND, mode);
+  else
+    icode = code_for_vw_wx (PLUS, ZERO_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwsubu functions.  */
+rtx
+vwsubu::expand (const function_instance &instance, tree exp,
+                     rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vw_vv (MINUS, ZERO_EXTEND, mode);
+  else if (instance.get_operation () == OP_vx)
+    icode = code_for_vw_vx (MINUS, ZERO_EXTEND, mode);
+  else if (instance.get_operation () == OP_wv)
+    icode = code_for_vw_wv (MINUS, ZERO_EXTEND, mode);
+  else
+    icode = code_for_vw_wx (MINUS, ZERO_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwcvt functions.  */
+rtx
+vwcvt::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode = code_for_vwcvt_x_x_v (SIGN_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwcvtu functions.  */
+rtx
+vwcvtu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode = code_for_vwcvt_x_x_v (ZERO_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vsext functions.  */
+char *
+vsext::assemble_name (function_instance &instance)
+{
+  intrinsic_rename (instance, 0);
+  append_name (instance.get_base_name ());
+  append_name (get_operation_str (instance.get_operation ()));
+  append_name (get_pred_str (instance.get_pred (), true));
+  return finish_name ();
+}
+
+rtx
+vsext::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vf2)
+    icode = code_for_vext_vf2 (SIGN_EXTEND, mode);
+  else if (instance.get_operation () == OP_vf4)
+    icode = code_for_vext_vf4 (SIGN_EXTEND, mode);
+  else
+    icode = code_for_vext_vf8 (SIGN_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vzext functions.  */
+char *
+vzext::assemble_name (function_instance &instance)
+{
+  intrinsic_rename (instance, 0);
+  append_name (instance.get_base_name ());
+  append_name (get_operation_str (instance.get_operation ()));
+  append_name (get_pred_str (instance.get_pred (), true));
+  return finish_name ();
+}
+
+rtx
+vzext::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vf2)
+    icode = code_for_vext_vf2 (ZERO_EXTEND, mode);
+  else if (instance.get_operation () == OP_vf4)
+    icode = code_for_vext_vf4 (ZERO_EXTEND, mode);
+  else
+    icode = code_for_vext_vf8 (ZERO_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vadc functions.  */
+rtx
+vadc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vvm)
+    icode = code_for_vadc_vvm (mode);
+  else
+    icode = code_for_v_vxm (UNSPEC_VADC, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vsbc functions.  */
+rtx
+vsbc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vvm)
+    icode = code_for_vsbc_vvm (mode);
+  else
+    icode = code_for_v_vxm (UNSPEC_VSBC, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmadc functions.  */
+char *
+vmadc::assemble_name (function_instance &instance)
+{
+  intrinsic_rename (instance, 0, 1);
+  append_name (instance.get_base_name ());
+  return finish_name ();
+}
+
+rtx
+vmadc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vvm)
+    icode = code_for_vmadc_vvm (mode);
+  else if (instance.get_operation () == OP_vv)
+    icode = code_for_vmadc_vv (mode);
+  else if (instance.get_operation () == OP_vxm)
+    icode = code_for_v_vxm (UNSPEC_VMADC, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMADC, mode);
+
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsbc functions.  */
+char *
+vmsbc::assemble_name (function_instance &instance)
+{
+  intrinsic_rename (instance, 0, 1);
+  append_name (instance.get_base_name ());
+  return finish_name ();
+}
+
+rtx
+vmsbc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vvm)
+    icode = code_for_vmsbc_vvm (mode);
+  else if (instance.get_operation () == OP_vv)
+    icode = code_for_vmsbc_vv (mode);
+  else if (instance.get_operation () == OP_vxm)
+    icode = code_for_v_vxm (UNSPEC_VMSBC, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMSBC, mode);
+
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vand functions.  */
+rtx
+vand::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (AND, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VAND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vor functions.  */
+rtx
+vor::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (IOR, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VIOX, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vxor functions.  */
+rtx
+vxor::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (XOR, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VXOR, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vnot functions.  */
+rtx
+vnot::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+
+  enum insn_code icode = code_for_vnot_v (mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vshift functions.  */
+void
+vshift::get_argument_types (const function_instance &instance,
+                            vec<tree> &argument_types) const
+{
+  argument_types.quick_push (get_dt_t_with_index (instance, 1));
+  if (instance.get_operation () == OP_vx || instance.get_operation () == OP_wx)
+    argument_types.quick_push (size_type_node);
+  else
+    argument_types.quick_push (get_dt_t_with_index (instance, 2));
+}
+
+/* A function implementation for vsll functions.  */
+rtx
+vsll::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (ASHIFT, mode);
+  else
+    icode = code_for_v_vx (ASHIFT, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vsrl functions.  */
+rtx
+vsrl::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (LSHIFTRT, mode);
+  else
+    icode = code_for_v_vx (LSHIFTRT, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vsra functions.  */
+rtx
+vsra::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (ASHIFTRT, mode);
+  else
+    icode = code_for_v_vx (ASHIFTRT, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vnsrl functions.  */
+rtx
+vnsrl::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_wv)
+    icode = code_for_vn_wv (LSHIFTRT, mode);
+  else
+    icode = code_for_vn_wx (LSHIFTRT, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vnsra functions.  */
+rtx
+vnsra::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[0];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_wv)
+    icode = code_for_vn_wv (ASHIFTRT, mode);
+  else
+    icode = code_for_vn_wx (ASHIFTRT, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vncvt functions.  */
+rtx
+vncvt::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode = code_for_vncvt_x_x_w (mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vcmp functions.  */
+char *
+vcmp::assemble_name (function_instance &instance)
+{
+  intrinsic_rename (instance, 0, 1);
+  append_name (instance.get_base_name ());
+  append_name (get_pred_str (instance.get_pred (), true));
+  return finish_name ();
+}
+
+/* A function implementation for vmseq functions.  */
+rtx
+vmseq::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (EQ, mode);
+  else
+    icode = code_for_vms_vx (EQ, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsne functions.  */
+rtx
+vmsne::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (NE, mode);
+  else
+    icode = code_for_vms_vx (NE, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmslt functions.  */
+rtx
+vmslt::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (LT, mode);
+  else
+    icode = code_for_vms_vx (LT, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsltu functions.  */
+rtx
+vmsltu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (LTU, mode);
+  else
+    icode = code_for_vms_vx (LTU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsgt functions.  */
+rtx
+vmsgt::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (GT, mode);
+  else
+    icode = code_for_vms_vx (GT, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsgtu functions.  */
+rtx
+vmsgtu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (GTU, mode);
+  else
+    icode = code_for_vms_vx (GTU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsle functions.  */
+rtx
+vmsle::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (LE, mode);
+  else
+    icode = code_for_vms_vx (LE, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsleu functions.  */
+rtx
+vmsleu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (LEU, mode);
+  else
+    icode = code_for_vms_vx (LEU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsge functions.  */
+rtx
+vmsge::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (GE, mode);
+  else
+    icode = code_for_vms_vx (GE, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmsgeu functions.  */
+rtx
+vmsgeu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vms_vv (GEU, mode);
+  else
+    icode = code_for_vms_vx (GEU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmin functions.  */
+rtx
+vmin::expand (const function_instance &instance, tree exp,
+                   rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (SMIN, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMIN, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vminu functions.  */
+rtx
+vminu::expand (const function_instance &instance, tree exp,
+                   rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (UMIN, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMINU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmax functions.  */
+rtx
+vmax::expand (const function_instance &instance, tree exp,
+                   rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (SMAX, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMAX, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmaxu functions.  */
+rtx
+vmaxu::expand (const function_instance &instance, tree exp,
+                   rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (UMAX, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMAXU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmul functions.  */
+rtx
+vmul::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vmul_vv (mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMUL, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmulh functions.  */
+rtx
+vmulh::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vmulh_vv (UNSPEC_VMULH, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMULH, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmulhu functions.  */
+rtx
+vmulhu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vmulh_vv (UNSPEC_VMULHU, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMULHU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmulhsu functions.  */
+rtx
+vmulhsu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vmulhsu_vv (mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VMULHSU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vdiv functions.  */
+rtx
+vdiv::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (DIV, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VDIV, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vdivu functions.  */
+rtx
+vdivu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (UDIV, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VDIVU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vrem functions.  */
+rtx
+vrem::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (MOD, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VREM, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vremu functions.  */
+rtx
+vremu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (UMOD, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_VREMU, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+                               
+/* A function implementation for vwmul functions.  */
+rtx
+vwmul::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vwmul_vv (SIGN_EXTEND, mode);
+  else
+    icode = code_for_vwmul_vx (SIGN_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwmulu functions.  */
+rtx
+vwmulu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vwmul_vv (ZERO_EXTEND, mode);
+  else
+    icode = code_for_vwmul_vx (ZERO_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwmulsusu functions.  */
+rtx
+vwmulsu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vwmulsu_vv (mode);
+  else
+    icode = code_for_vwmulsu_vx (mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmacc functions.  */
+rtx
+vmacc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (UNSPEC_MACC, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_MACC, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vnmsac functions.  */
+rtx
+vnmsac::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (UNSPEC_NMSAC, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_NMSAC, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmadd functions.  */
+rtx
+vmadd::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (UNSPEC_MADD, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_MADD, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vnmsub functions.  */
+rtx
+vnmsub::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_v_vv (UNSPEC_NMSUB, mode);
+  else
+    icode = code_for_v_vx (UNSPEC_NMSUB, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwmacc functions.  */
+rtx
+vwmacc::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vwmacc_vv (SIGN_EXTEND, mode);
+  else
+    icode = code_for_vwmacc_vx (SIGN_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwmaccu functions.  */
+rtx
+vwmaccu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[2];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vwmacc_vv (ZERO_EXTEND, mode);
+  else
+    icode = code_for_vwmacc_vx (ZERO_EXTEND, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwmaccsu functions.  */
+rtx
+vwmaccsu::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vv)
+    icode = code_for_vwmaccsu_vv (mode);
+  else
+    icode = code_for_vwmaccsu_vx (mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vwmaccus functions.  */
+rtx
+vwmaccus::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = instance.get_arg_pattern ().arg_list[1];
+  enum insn_code icode = code_for_vwmaccus_vx (mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmerge functions.  */
+size_t
+vmerge::get_position_of_dest_arg (enum predication_index) const
+{
+  return 1;
+}
+
+rtx
+vmerge::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_vvm)
+    icode = code_for_vmerge_vvm (mode);
+  else
+   icode = code_for_v_vxm (UNSPEC_VMERGE, mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vmv functions.  */
+void
+vmv::get_argument_types (const function_instance &instance,
+                         vec<tree> &argument_types) const
+{
+  if (instance.get_operation () == OP_v_x)
+    argument_types.quick_push (get_dt_t_with_index (instance, 1));
+  else
+    argument_types.quick_push (get_dt_t_with_index (instance, 0));
+}
+
+bool
+vmv::can_be_overloaded_p (const function_instance &instance) const
+{
+  if (instance.get_operation () == OP_v_v)
+    return true;
+  
+  if (instance.get_pred () == PRED_tu)
+    return true;
+  
+  return false;
+}
+
+rtx
+vmv::expand (const function_instance &instance, tree exp, rtx target) const
+{
+  machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+  enum insn_code icode;
+  if (instance.get_operation () == OP_v_x)
+    icode = code_for_v_v_x (UNSPEC_VMV, mode);
+  else
+    icode = code_for_vmv_v_v (mode);
+  return expand_builtin_insn (icode, exp, target, instance);
+}
+
 } // end namespace riscv_vector
 
 using namespace riscv_vector;
diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.def b/gcc/config/riscv/riscv-vector-builtins-functions.def
index 6d82b1c933d..bf9d42e6d67 100644
--- a/gcc/config/riscv/riscv-vector-builtins-functions.def
+++ b/gcc/config/riscv/riscv-vector-builtins-functions.def
@@ -139,6 +139,104 @@  DEF_RVV_FUNCTION(vsoxei, vsoxei, (4, VITER(V128UNITSI, unsigned), VATTR(0, VSUB,
 DEF_RVV_FUNCTION(vleff, vleff, (2, VITER(VI, signed), VATTR(0, VSUB, c_ptr)), pat_mask_tail, pred_all, OP_v)
 DEF_RVV_FUNCTION(vleff, vleff, (2, VITER(VI, unsigned), VATTR(0, VSUB, c_uptr)), pat_mask_tail, pred_all, OP_v)
 DEF_RVV_FUNCTION(vleff, vleff, (2, VITER(VF, signed), VATTR(0, VSUB, c_ptr)), pat_mask_tail, pred_all, OP_v)
+/* 11. Vector Integer Arithmetic Instructions. */
+DEF_RVV_FUNCTION(vadd, vadd, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vadd, vadd, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vsub, vsub, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vsub, vsub, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vrsub, vrsub, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vrsub, vrsub, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vneg, vneg, (2, VITER(VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vwadd, vwadd, (3, VATTR(1, VW, signed), VITER(VWI, signed), VATTR(1, VWI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwsub, vwsub, (3, VATTR(1, VW, signed), VITER(VWI, signed), VATTR(1, VWI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwaddu, vwaddu, (3, VATTR(1, VW, unsigned), VITER(VWI, unsigned), VATTR(1, VWI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwsubu, vwsubu, (3, VATTR(1, VW, unsigned), VITER(VWI, unsigned), VATTR(1, VWI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwadd, vwadd, (3, VATTR(2, VW, signed), VATTR(2, VW, signed), VITER(VWI, signed)), pat_mask_tail, pred_all, OP_wv | OP_wx)
+DEF_RVV_FUNCTION(vwsub, vwsub, (3, VATTR(2, VW, signed), VATTR(2, VW, signed), VITER(VWI, signed)), pat_mask_tail, pred_all, OP_wv | OP_wx)
+DEF_RVV_FUNCTION(vwaddu, vwaddu, (3, VATTR(2, VW, unsigned), VATTR(2, VW, unsigned), VITER(VWI, unsigned)), pat_mask_tail, pred_all, OP_wv | OP_wx)
+DEF_RVV_FUNCTION(vwsubu, vwsubu, (3, VATTR(2, VW, unsigned), VATTR(2, VW, unsigned), VITER(VWI, unsigned)), pat_mask_tail, pred_all, OP_wv | OP_wx)
+DEF_RVV_FUNCTION(vwcvt, vwcvt, (2, VATTR(1, VW, signed), VITER(VWI, signed)), pat_mask_tail, pred_all, OP_x_x_v)
+DEF_RVV_FUNCTION(vwcvtu, vwcvtu, (2, VATTR(1, VW, unsigned), VITER(VWI, unsigned)), pat_mask_tail, pred_all, OP_x_x_v)
+DEF_RVV_FUNCTION(vsext, vsext, (2, VATTR(1, VW, signed), VITER(VWI, signed)), pat_mask_tail, pred_all, OP_vf2)
+DEF_RVV_FUNCTION(vsext, vsext, (2, VATTR(1, VQW, signed), VITER(VQWI, signed)), pat_mask_tail, pred_all, OP_vf4)
+DEF_RVV_FUNCTION(vsext, vsext, (2, VATTR(1, VOW, signed), VITER(VOWI, signed)), pat_mask_tail, pred_all, OP_vf8)
+DEF_RVV_FUNCTION(vzext, vzext, (2, VATTR(1, VW, unsigned), VITER(VWI, unsigned)), pat_mask_tail, pred_all, OP_vf2)
+DEF_RVV_FUNCTION(vzext, vzext, (2, VATTR(1, VQW, unsigned), VITER(VQWI, unsigned)), pat_mask_tail, pred_all, OP_vf4)
+DEF_RVV_FUNCTION(vzext, vzext, (2, VATTR(1, VOW, unsigned), VITER(VOWI, unsigned)), pat_mask_tail, pred_all, OP_vf8)
+DEF_RVV_FUNCTION(vadc, vadc, (4, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed), VATTR(0, VM, signed)), PAT_tail, pred_tail, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vsbc, vsbc, (4, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed), VATTR(0, VM, signed)), PAT_tail, pred_tail, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vadc, vadc, (4, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VM, unsigned)), PAT_tail, pred_tail, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vsbc, vsbc, (4, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VM, unsigned)), PAT_tail, pred_tail, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vmadc, vmadc, (4, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed), VATTR(1, VM, signed)), PAT_none, PRED_void, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vmadc, vmadc, (3, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed)), PAT_none, PRED_void, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsbc, vmsbc, (4, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed), VATTR(1, VM, signed)), PAT_none, PRED_void, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vmsbc, vmsbc, (3, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed)), PAT_none, PRED_void, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmadc, vmadc, (4, VATTR(1, VM, signed), VITER(VI, unsigned), VATTR(1, VI, unsigned), VATTR(1, VM, signed)), PAT_none, PRED_void, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vmadc, vmadc, (3, VATTR(1, VM, signed), VITER(VI, unsigned), VATTR(1, VI, unsigned)), PAT_none, PRED_void, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsbc, vmsbc, (4, VATTR(1, VM, signed), VITER(VI, unsigned), VATTR(1, VI, unsigned), VATTR(1, VM, signed)), PAT_none, PRED_void, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vmsbc, vmsbc, (3, VATTR(1, VM, signed), VITER(VI, unsigned), VATTR(1, VI, unsigned)), PAT_none, PRED_void, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vand, vand, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vor, vor, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vxor, vxor, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vand, vand, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vor, vor, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vxor, vxor, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vnot, vnot, (2, VITER(VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vnot, vnot, (2, VITER(VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_v)
+DEF_RVV_FUNCTION(vsll, vsll, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vsll, vsll, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vsrl, vsrl, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vsra, vsra, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vnsrl, vnsrl, (3, VITER(VWI, unsigned), VATTR(0, VW, unsigned), VATTR(0, VWI, unsigned)), pat_mask_tail, pred_all, OP_wv | OP_wx)
+DEF_RVV_FUNCTION(vnsra, vnsra, (3, VITER(VWI, signed), VATTR(0, VW, signed), VATTR(0, VWI, unsigned)), pat_mask_tail, pred_all, OP_wv | OP_wx)
+DEF_RVV_FUNCTION(vncvt, vncvt, (2, VITER(VWI, signed), VATTR(0, VW, signed)), pat_mask_tail, pred_all, OP_x_x_w)
+DEF_RVV_FUNCTION(vncvt, vncvt, (2, VITER(VWI, unsigned), VATTR(0, VW, unsigned)), pat_mask_tail, pred_all, OP_x_x_w)
+DEF_RVV_FUNCTION(vmseq, vmseq, (3, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmseq, vmseq, (3, VATTR(1, VM, unsigned), VITER(VI, unsigned), VATTR(1, VI, unsigned)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsne, vmsne, (3, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsne, vmsne, (3, VATTR(1, VM, unsigned), VITER(VI, unsigned), VATTR(1, VI, unsigned)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmslt, vmslt, (3, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsltu, vmsltu, (3, VATTR(1, VM, unsigned), VITER(VI, unsigned), VATTR(1, VI, unsigned)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsgt, vmsgt, (3, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsgtu, vmsgtu, (3, VATTR(1, VM, unsigned), VITER(VI, unsigned), VATTR(1, VI, unsigned)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsle, vmsle, (3, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsleu, vmsleu, (3, VATTR(1, VM, unsigned), VITER(VI, unsigned), VATTR(1, VI, unsigned)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsge, vmsge, (3, VATTR(1, VM, signed), VITER(VI, signed), VATTR(1, VI, signed)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmsgeu, vmsgeu, (3, VATTR(1, VM, unsigned), VITER(VI, unsigned), VATTR(1, VI, unsigned)), pat_mask_ignore_tp, pred_mask, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmin, vmin, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vminu, vminu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmax, vmax, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmaxu, vmaxu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmul, vmul, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmul, vmul, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmulh, vmulh, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmulhu, vmulhu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmulhsu, vmulhsu, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vdiv, vdiv, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vdivu, vdivu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vrem, vrem, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vremu, vremu, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwmul, vwmul, (3, VATTR(1, VW, signed), VITER(VWI, signed), VATTR(1, VWI, signed)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwmulu, vwmulu, (3, VATTR(1, VW, unsigned), VITER(VWI, unsigned), VATTR(1, VWI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwmulsu, vwmulsu, (3, VATTR(1, VW, signed), VITER(VWI, signed), VATTR(1, VWI, unsigned)), pat_mask_tail, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmacc, vmacc, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vnmsac, vnmsac, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmadd, vmadd, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vnmsub, vnmsub, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmacc, vmacc, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vnmsac, vnmsac, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vmadd, vmadd, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vnmsub, vnmsub, (3, VITER(VI, unsigned), VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwmacc, vwmacc, (3, VATTR(1, VW, signed), VITER(VWI, signed), VATTR(1, VWI, signed)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwmaccu, vwmaccu, (3, VATTR(1, VW, unsigned), VITER(VWI, unsigned), VATTR(1, VWI, unsigned)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwmaccsu, vwmaccsu, (3, VATTR(1, VW, signed), VITER(VWI, signed), VATTR(1, VWI, unsigned)), pat_mask_tail_dest, pred_all, OP_vv | OP_vx)
+DEF_RVV_FUNCTION(vwmaccus, vwmaccus, (3, VATTR(1, VW, signed), VITER(VWI, unsigned), VATTR(1, VWI, signed)), pat_mask_tail_dest, pred_all, OP_vx)
+DEF_RVV_FUNCTION(vmerge, vmerge, (3, VITER(VI, signed), VATTR(0, VI, signed), VATTR(0, VI, signed)), PAT_tail | PAT_merge, pred_tail, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vmerge, vmerge, (3, VITER(VI, unsigned),VATTR(0, VI, unsigned), VATTR(0, VI, unsigned)), PAT_tail | PAT_merge, pred_tail, OP_vvm | OP_vxm)
+DEF_RVV_FUNCTION(vmerge, vmerge, (3, VITER(VF, signed), VATTR(0, VF, signed), VATTR(0, VF, signed)), PAT_tail | PAT_merge, pred_tail, OP_vvm)
+DEF_RVV_FUNCTION(vmv, vmv, (2, VITER(VI, signed), VATTR(0, VSUB, signed)), PAT_tail, pred_tail, OP_v_v | OP_v_x)
+DEF_RVV_FUNCTION(vmv, vmv, (2, VITER(VI, unsigned), VATTR(0, VSUB, unsigned)), PAT_tail, pred_tail, OP_v_v | OP_v_x)
+DEF_RVV_FUNCTION(vmv, vmv, (2, VITER(VF, signed), VATTR(0, VSUB, signed)), PAT_tail, pred_tail, OP_v_v)
 #undef REQUIRED_EXTENSIONS
 #undef DEF_RVV_FUNCTION
 #undef VITER
diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.h b/gcc/config/riscv/riscv-vector-builtins-functions.h
index a37e21876a6..bde03e8d49d 100644
--- a/gcc/config/riscv/riscv-vector-builtins-functions.h
+++ b/gcc/config/riscv/riscv-vector-builtins-functions.h
@@ -831,6 +831,707 @@  public:
   virtual rtx expand (const function_instance &, tree, rtx) const override;
 };
 
+/* A function_base for basic_alu functions.  */
+class basic_alu : public function_builder
+{
+public:
+  // use the same construction function as the function_builder
+  using function_builder::function_builder;
+  
+  virtual char * assemble_name (function_instance &) override;
+  
+  virtual tree get_return_type (const function_instance &) const override;
+};
+
+/* A function_base for unary functions.  */
+class unop : public basic_alu
+{
+public:
+  // use the same construction function as the basic_alu
+  using basic_alu::basic_alu;
+  
+  virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+  
+  virtual bool can_be_overloaded_p (const function_instance &) const override;
+};
+
+/* A function_base for binary functions.  */
+class binop : public basic_alu
+{
+public:
+  // use the same construction function as the basic_alu
+  using basic_alu::basic_alu;
+
+  virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+  
+  virtual bool can_be_overloaded_p (const function_instance &) const override;
+};
+
+/* A function_base for widen binary functions.  */
+class wbinop : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual char * assemble_name (function_instance &) override;
+};
+
+/* A function_base for ternary functions.  */
+class ternop : public basic_alu
+{
+public:
+  // use the same construction function as the function_builder
+  using basic_alu::basic_alu;
+
+  virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+  
+  virtual bool can_be_overloaded_p (const function_instance &) const override;
+};
+
+/* A function_base for vadd functions.  */
+class vadd : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vsub functions.  */
+class vsub : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vrsub functions.  */
+class vrsub : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vneg functions.  */
+class vneg : public unop
+{
+public:
+  // use the same construction function as the unop
+  using unop::unop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwadd functions.  */
+class vwadd : public wbinop
+{
+public:
+  // use the same construction function as the wbinop
+  using wbinop::wbinop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwsub functions.  */
+class vwsub : public wbinop
+{
+public:
+  // use the same construction function as the wbinop
+  using wbinop::wbinop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwaddu functions.  */
+class vwaddu : public wbinop
+{
+public:
+  // use the same construction function as the wbinop
+  using wbinop::wbinop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwsubu functions.  */
+class vwsubu : public wbinop
+{
+public:
+  // use the same construction function as the wbinop
+  using wbinop::wbinop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwcvt functions.  */
+class vwcvt : public unop
+{
+public:
+  // use the same construction function as the unop
+  using unop::unop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwcvtu functions.  */
+class vwcvtu : public unop
+{
+public:
+  // use the same construction function as the unop
+  using unop::unop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vsext functions.  */
+class vsext : public unop
+{
+public:
+  // use the same construction function as the unop
+  using unop::unop;
+  
+  virtual char * assemble_name (function_instance &) override;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vzext functions.  */
+class vzext : public unop
+{
+public:
+  // use the same construction function as the unop
+  using unop::unop;
+  
+  virtual char * assemble_name (function_instance &) override;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vadc functions.  */
+class vadc : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vsbc functions.  */
+class vsbc : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmadc functions.  */
+class vmadc : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual char * assemble_name (function_instance &) override;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsbc functions.  */
+class vmsbc : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual char * assemble_name (function_instance &) override;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vand functions.  */
+class vand : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vor functions.  */
+class vor : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vxor functions.  */
+class vxor : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vnot functions.  */
+class vnot : public unop
+{
+public:
+  // use the same construction function as the unop
+  using unop::unop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vshift functions.  */
+class vshift : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+};
+
+/* A function_base for vsll functions.  */
+class vsll : public vshift
+{
+public:
+  // use the same construction function as the vshift
+  using vshift::vshift;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vsrl functions.  */
+class vsrl : public vshift
+{
+public:
+  // use the same construction function as the vshift
+  using vshift::vshift;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vsra functions.  */
+class vsra : public vshift
+{
+public:
+  // use the same construction function as the vshift
+  using vshift::vshift;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vnsrl functions.  */
+class vnsrl : public vshift
+{
+public:
+  // use the same construction function as the binop
+  using vshift::vshift;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vnsra functions.  */
+class vnsra : public vshift
+{
+public:
+  // use the same construction function as the binop
+  using vshift::vshift;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vncvt functions.  */
+class vncvt : public unop
+{
+public:
+  // use the same construction function as the unop
+  using unop::unop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vcmp functions.  */
+class vcmp : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual char * assemble_name (function_instance &) override;
+};
+
+/* A function_base for vmseq functions.  */
+class vmseq : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsne functions.  */
+class vmsne : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmslt functions.  */
+class vmslt : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsltu functions.  */
+class vmsltu : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsgt functions.  */
+class vmsgt : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsgtu functions.  */
+class vmsgtu : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsle functions.  */
+class vmsle : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsleu functions.  */
+class vmsleu : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsge functions.  */
+class vmsge : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmsgeu functions.  */
+class vmsgeu : public vcmp
+{
+public:
+  // use the same construction function as the binop
+  using vcmp::vcmp;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmin functions.  */
+class vmin : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vminu functions.  */
+class vminu : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmax functions.  */
+class vmax : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmaxu functions.  */
+class vmaxu : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmul functions.  */
+class vmul : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmulh functions.  */
+class vmulh : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmulhu functions.  */
+class vmulhu : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmulhsu functions.  */
+class vmulhsu : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vdiv functions.  */
+class vdiv : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vdivu functions.  */
+class vdivu : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vrem functions.  */
+class vrem : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vremu functions.  */
+class vremu : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwmul functions.  */
+class vwmul : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwmulu functions.  */
+class vwmulu : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwmulsusu functions.  */
+class vwmulsu : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmacc functions.  */
+class vmacc : public ternop
+{
+public:
+  // use the same construction function as the ternop
+  using ternop::ternop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vnmsac functions.  */
+class vnmsac : public ternop
+{
+public:
+  // use the same construction function as the ternop
+  using ternop::ternop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmadd functions.  */
+class vmadd : public ternop
+{
+public:
+  // use the same construction function as the ternop
+  using ternop::ternop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vnmsub functions.  */
+class vnmsub : public ternop
+{
+public:
+  // use the same construction function as the ternop
+  using ternop::ternop;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwmacc functions.  */
+class vwmacc : public ternop
+{
+public:
+  // use the same construction function as the ternop
+  using ternop::ternop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwmaccu functions.  */
+class vwmaccu : public ternop
+{
+public:
+  // use the same construction function as the ternop
+  using ternop::ternop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwmaccsu functions.  */
+class vwmaccsu : public ternop
+{
+public:
+  // use the same construction function as the ternop
+  using ternop::ternop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vwmaccus functions.  */
+class vwmaccus : public ternop
+{
+public:
+  // use the same construction function as the ternop
+  using ternop::ternop;
+  
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmerge functions.  */
+class vmerge : public binop
+{
+public:
+  // use the same construction function as the binop
+  using binop::binop;
+  
+  virtual size_t get_position_of_dest_arg (enum predication_index) const override;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vmv functions.  */
+class vmv : public unop
+{
+public:
+  // use the same construction function as the unop
+  using unop::unop;
+  
+  virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+  
+  virtual bool can_be_overloaded_p (const function_instance &) const override;
+
+  virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
 
 } // namespace riscv_vector
 
diff --git a/gcc/config/riscv/riscv-vector.cc b/gcc/config/riscv/riscv-vector.cc
index 175d6da4695..1d53c50a751 100644
--- a/gcc/config/riscv/riscv-vector.cc
+++ b/gcc/config/riscv/riscv-vector.cc
@@ -937,7 +937,7 @@  modify_operands (machine_mode Vmode, machine_mode VSImode,
     {
       if (imm32_p (operands[i]))
         {
-          if (!imm5_p (operands[i]))
+          if (!imm5_p (operands[i]) || unspec == UNSPEC_VSSUBU)
             operands[i] = force_reg (SImode, operands[i]);
           return GEN_VX_32BIT;
         }
@@ -963,7 +963,7 @@  modify_operands (machine_mode Vmode, machine_mode VSImode,
     }
   else
     {
-      if (!imm5_p (operands[i]))
+      if (!imm5_p (operands[i]) || unspec == UNSPEC_VSSUBU)
         operands[i] = force_reg (VSUBmode, operands[i]);
       return GEN_VX;
     }
@@ -1018,4 +1018,41 @@  emit_op5 (unsigned int unspec, machine_mode Vmode, machine_mode VSImode,
 
   emit_insn (
       (*gen) (operands[0], operands[1], operands[2], operands[3], operands[4]));
+}
+
+/* Helper functions for handling sew=64 on RV32 system. */
+void
+emit_op6 (unsigned int unspec ATTRIBUTE_UNUSED, machine_mode Vmode,
+          machine_mode VSImode, machine_mode VMSImode, machine_mode VSUBmode,
+          rtx *operands, gen_6 *gen_vx, gen_6 *gen_vx_32bit, gen_6 *gen_vv,
+          imm_p *imm5_p, int i, bool reverse)
+{
+  enum GEN_CLASS gen_class = modify_operands (
+      Vmode, VSImode, VMSImode, VSUBmode, operands, imm5_p, i, reverse, unspec);
+
+  gen_6 *gen = gen_class == GEN_VX   ? gen_vx
+               : gen_class == GEN_VV ? gen_vv
+                                     : gen_vx_32bit;
+
+  emit_insn ((*gen) (operands[0], operands[1], operands[2], operands[3],
+                     operands[4], operands[5]));
+}
+
+
+/* Helper functions for handling sew=64 on RV32 system. */
+void
+emit_op7 (unsigned int unspec, machine_mode Vmode, machine_mode VSImode,
+          machine_mode VMSImode, machine_mode VSUBmode, rtx *operands,
+          gen_7 *gen_vx, gen_7 *gen_vx_32bit, gen_7 *gen_vv, imm_p *imm5_p,
+          int i, bool reverse)
+{
+  enum GEN_CLASS gen_class = modify_operands (
+      Vmode, VSImode, VMSImode, VSUBmode, operands, imm5_p, i, reverse, unspec);
+
+  gen_7 *gen = gen_class == GEN_VX   ? gen_vx
+               : gen_class == GEN_VV ? gen_vv
+                                     : gen_vx_32bit;
+
+  emit_insn ((*gen) (operands[0], operands[1], operands[2], operands[3],
+                     operands[4], force_reg_for_over_uimm (operands[5]), operands[6]));
 }
\ No newline at end of file
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index 29106bbf6fe..664798b9108 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -3964,6 +3964,28 @@  riscv_print_operand (FILE *file, rtx op, int letter)
 	output_addr_const (file, newop);
 	break;
       }
+    case 'v':
+      {
+	rtx elt;
+	if (!const_vec_duplicate_p (op, &elt))
+	  output_operand_lossage ("invalid vector constant");
+	else if (GET_MODE_CLASS (GET_MODE (op)) == MODE_VECTOR_INT)
+	  asm_fprintf (file, "%wd", INTVAL (elt));
+	else
+	  output_operand_lossage ("invalid vector constant");
+	break;
+      }
+    case 'V':
+      {
+	rtx elt;
+	if (!const_vec_duplicate_p (op, &elt))
+	  output_operand_lossage ("invalid vector constant");
+	else if (GET_MODE_CLASS (GET_MODE (op)) == MODE_VECTOR_INT)
+	  asm_fprintf (file, "%wd", -INTVAL (elt));
+	else
+	  output_operand_lossage ("invalid vector constant");
+	break;
+      }
     default:
       switch (code)
 	{
@@ -3980,6 +4002,19 @@  riscv_print_operand (FILE *file, rtx op, int letter)
 	    output_address (mode, XEXP (op, 0));
 	  break;
 
+  case CONST_VECTOR:
+    {
+	rtx imm;
+	if (!const_vec_duplicate_p (op, &imm))
+	  {
+	    output_operand_lossage ("invalid immediate value for vector");
+	    break;
+	  }
+	gcc_assert (CONST_INT_P (imm));
+	asm_fprintf (file, "%wd", INTVAL (imm));
+	break;
+    }
+
 	default:
 	  if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
 	    fputs (reg_names[GP_REG_FIRST], file);
@@ -5196,6 +5231,24 @@  riscv_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
 static bool
 riscv_modes_tieable_p (machine_mode mode1, machine_mode mode2)
 {
+  if (rvv_mode_p (mode1) && rvv_mode_p (mode2))
+    {
+      /* Only allow normal vector modes to be tied. */
+      return true;
+    }
+  else if (rvv_mode_p (mode1) || rvv_mode_p (mode2))
+    {
+      /* If only one is vector mode, then don't allow scaler and vector mode to be tied. */
+      return false;
+    }
+  else if (TARGET_VECTOR && (
+            (GET_MODE_CLASS (mode1) == MODE_FLOAT && GET_MODE_CLASS (mode2) != MODE_FLOAT)
+            || (GET_MODE_CLASS (mode1) != MODE_FLOAT && GET_MODE_CLASS (mode2) == MODE_FLOAT)))
+    {
+      /* When V extension is enabled, that implied F or D Extension is also enabled.
+         In this situation, disable float and scaler mode to be tied.  */
+      return false;
+    }
   return (mode1 == mode2
 	  || !(GET_MODE_CLASS (mode1) == MODE_FLOAT
 	       && GET_MODE_CLASS (mode2) == MODE_FLOAT));
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index ae4f5b50214..f1d4fce24ca 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -471,7 +471,9 @@ 
 		     (gt "") (gtu "u")
 		     (ge "") (geu "u")
 		     (lt "") (ltu "u")
-		     (le "") (leu "u")])
+		     (le "") (leu "u")
+		     (fix "") (unsigned_fix "u")
+		     (float "") (unsigned_float "u")])
 
 ;; <su> is like <u>, but the signed form expands to "s" rather than "".
 (define_code_attr su [(sign_extend "s") (zero_extend "u")])
@@ -480,19 +482,52 @@ 
 (define_code_attr optab [(ashift "ashl")
 			 (ashiftrt "ashr")
 			 (lshiftrt "lshr")
+			 (mult "mul")
 			 (div "div")
 			 (mod "mod")
 			 (udiv "udiv")
 			 (umod "umod")
+			 (eq "eq")
+			 (ne "ne")
 			 (ge "ge")
 			 (le "le")
 			 (gt "gt")
 			 (lt "lt")
+			 (geu "geu")
+			 (leu "leu")
+			 (gtu "gtu")
+			 (ltu "ltu")
 			 (ior "ior")
 			 (xor "xor")
 			 (and "and")
 			 (plus "add")
-			 (minus "sub")])
+			 (minus "sub")
+			 (smax "smax")
+			 (umax "umax")
+			 (smin "smin")
+			 (umin "umin")
+			 (us_plus "usadd")
+			 (ss_plus "ssadd")
+			 (us_minus "ussub")
+			 (ss_minus "sssub")
+			 (neg "neg")
+			 (not "one_cmpl")
+			 (abs "abs")
+			 (fix "fix_trunc")
+			 (unsigned_fix "fixuns_trunc")
+			 (float "float")
+			 (unsigned_float "floatuns")
+			 (sqrt "sqrt")
+			 (unordered "unordered")
+			 (ordered "ordered")
+			 (unlt "unlt") 
+			 (unle "unle") 
+			 (unge "unge") 
+			 (ungt "ungt") 
+			 (uneq "uneq")
+			 (ltgt "ltgt")
+			 (sign_extend "extend") 
+			 (zero_extend "zero_extend")])
 
 ;; <insn> expands to the name of the insn that implements a particular code.
 (define_code_attr insn [(ashift "sll")
@@ -506,7 +541,28 @@ 
 			(xor "xor")
 			(and "and")
 			(plus "add")
-			(minus "sub")])
+			(minus "sub")
+			(smax "max")
+			(umax "maxu")
+			(smin "min")
+			(umin "minu")
+			(us_plus "saddu")
+			(ss_plus "sadd")
+			(us_minus "ssubu")
+			(ss_minus "ssub")
+			(eq "eq")
+			(ne "ne")
+			(ge "ge")
+			(le "le")
+			(gt "gt")
+			(lt "lt")
+			(geu "geu")
+			(leu "leu")
+			(gtu "gtu")
+			(ltu "ltu")
+			(neg "neg")
+			(not "not")
+			(mult "mul")])
 
 ;; Ghost instructions produce no real code and introduce no hazards.
 ;; They exist purely to express an effect on dataflow.
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index 501980d822f..748025d4080 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -792,9 +792,6 @@ 
   UNSPEC_VMIN UNSPEC_VMINU UNSPEC_VMAX UNSPEC_VMAXU
   UNSPEC_VMUL UNSPEC_VMULH UNSPEC_VMULHU UNSPEC_VMULHSU
   UNSPEC_VDIV UNSPEC_VDIVU UNSPEC_VREM UNSPEC_VREMU
-  UNSPEC_VSADD UNSPEC_VSADDU UNSPEC_VSSUB UNSPEC_VSSUBU
-  UNSPEC_VAADD UNSPEC_VAADDU UNSPEC_VASUB UNSPEC_VASUBU
-  UNSPEC_VSMUL
 ])
 
 (define_int_iterator VXMOP [
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index fc7ec77dfc4..cb8bdc5781f 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -974,6 +974,228 @@ 
   [(set_attr "type" "vleff")
    (set_attr "mode" "<MODE>")])
 
+;; -------------------------------------------------------------------------------
+;; ---- expands for some insns, to support sew64 on TARGET_32BIT
+;; -------------------------------------------------------------------------------
+(define_expand "@v<vxoptab><mode>_vx"
+  [(unspec [
+    (match_operand:VI 0 "register_operand")
+    (match_operand:<VM> 1 "vector_reg_or_const0_operand")
+    (match_operand:VI 2 "vector_reg_or_const0_operand")
+    (match_operand:VI 3 "register_operand")
+    (match_operand:<VSUB> 4 "reg_or_const_int_operand")
+    (match_operand 5 "p_reg_or_const_csr_operand")
+    (match_operand 6 "const_int_operand")
+   ] VXOP)]
+  "TARGET_VECTOR"
+  {
+    emit_op7 (
+      <VXOPTAB>,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_v<vxoptab><mode>_vx_internal,
+      gen_v<vxoptab><vi_to_v64biti>_vx_32bit,
+      gen_v<vxoptab><mode>_vv,
+      satisfies_constraint_<immptab>,
+      4, false
+    );
+    DONE;
+  }
+)
+
+;; vrsub
+(define_expand "@v<vxoptab><mode>_vx"
+  [(unspec [
+    (match_operand:VI 0 "register_operand")
+    (match_operand:<VM> 1 "vector_reg_or_const0_operand")
+    (match_operand:VI 2 "vector_reg_or_const0_operand")
+    (match_operand:VI 3 "register_operand")
+    (match_operand:<VSUB> 4 "reg_or_const_int_operand")
+    (match_operand 5 "p_reg_or_const_csr_operand")
+    (match_operand 6 "const_int_operand")
+   ] VXROP)]
+  "TARGET_VECTOR"
+  {
+    emit_op7 (
+      <VXOPTAB>,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_v<vxoptab><mode>_vx_internal,
+      gen_v<vxoptab><vi_to_v64biti>_vx_32bit,
+      gen_vsub<mode>_vv,
+      satisfies_constraint_<immptab>,
+      4, true
+    );
+    DONE;
+  }
+)
+
+;; mvx no mask
+(define_expand "@v<vxoptab><mode>_vx"
+  [(unspec [
+    (match_operand:<VM> 0 "register_operand")
+    (match_operand:VI 1 "register_operand")
+    (match_operand:<VSUB> 2 "reg_or_const_int_operand")
+    (match_operand 3 "p_reg_or_const_csr_operand")
+    (match_operand 4 "const_int_operand")
+   ] MVXOP)]
+  "TARGET_VECTOR"
+  {
+    emit_op5 (
+      0,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_v<vxoptab><mode>_vx_internal,
+      gen_v<vxoptab><vi_to_v64biti>_vx_32bit,
+      gen_v<vxoptab><mode>_vv,
+      satisfies_constraint_<immptab>,
+      2, false
+    );
+    DONE;
+  }
+)
+
+;; vxm no tail policy
+(define_expand "@v<vxoptab><mode>_vxm"
+  [(unspec [
+    (match_operand:<VM> 0 "register_operand")
+    (match_operand:VI 1 "register_operand")
+    (match_operand:<VSUB> 2 "reg_or_const_int_operand")
+    (match_operand:<VM> 3 "register_operand")
+    (match_operand 4 "p_reg_or_const_csr_operand")
+    (match_operand 5 "const_int_operand")
+   ] VXMOP_NO_POLICY)]
+  "TARGET_VECTOR"
+  {
+    emit_op6 (
+      <VXOPTAB>,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_v<vxoptab><mode>_vxm_internal,
+      gen_v<vxoptab><vi_to_v64biti>_vxm_32bit,
+      gen_v<vxoptab><mode>_vvm,
+      satisfies_constraint_<immptab>,
+      2, false
+    );
+    DONE;
+  }
+)
+
+;; compare
+(define_expand "@vms<optab><mode>_vx"
+  [(match_operand:<VM> 0 "register_operand")
+   (match_operand:<VM> 1 "vector_reg_or_const0_operand")
+   (match_operand:<VM> 2 "vector_reg_or_const0_operand")
+   (cmp_all:<VM> (match_operand:VI 3 "register_operand")
+    (vec_duplicate:VI (match_operand:<VSUB> 4 "reg_or_const_int_operand")))
+   (match_operand 5 "p_reg_or_const_csr_operand")
+   (match_operand 6 "const_int_operand")
+  ]
+  "TARGET_VECTOR"
+  {
+    emit_op7 (
+      0,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_vms<optab><mode>_vx_internal,
+      gen_vms<optab><vi_to_v64biti>_vx_32bit,
+      gen_vms<optab><mode>_vv,
+      satisfies_constraint_<cmp_imm_p_tab>,
+      4, false
+    );
+    DONE;
+  }
+)
+
+;; vxm
+(define_expand "@v<vxoptab><mode>_vxm"
+  [(unspec [
+    (match_operand:VI 0 "register_operand")
+    (match_operand:VI 1 "vector_reg_or_const0_operand")
+    (match_operand:VI 2 "register_operand")
+    (match_operand:<VSUB> 3 "reg_or_const_int_operand")
+    (match_operand:<VM> 4 "register_operand")
+    (match_operand 5 "p_reg_or_const_csr_operand")
+    (match_operand 6 "const_int_operand")
+   ] VXMOP)]
+  "TARGET_VECTOR"
+  {
+    emit_op7 (
+      <VXOPTAB>,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_v<vxoptab><mode>_vxm_internal,
+      gen_v<vxoptab><vi_to_v64biti>_vxm_32bit,
+      gen_v<vxoptab><mode>_vvm,
+      satisfies_constraint_<immptab>,
+      3, false
+    );
+    DONE;
+  }
+)
+
+;; mac
+(define_expand "@v<vxoptab><mode>_vx"
+  [(unspec [
+    (match_operand:VI 0 "register_operand")
+    (match_operand:<VM> 1 "vector_reg_or_const0_operand")
+    (match_operand:VI 2 "register_operand")
+    (match_operand:<VSUB> 3 "reg_or_const_int_operand")
+    (match_operand:VI 4 "register_operand")
+    (match_operand 5 "p_reg_or_const_csr_operand")
+    (match_operand 6 "const_int_operand")
+   ] MACOP)]
+  "TARGET_VECTOR"
+  {
+    emit_op7 (
+      <VXOPTAB>,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_v<vxoptab><mode>_vx_internal,
+      gen_v<vxoptab><vi_to_v64biti>_vx_32bit,
+      gen_v<vxoptab><mode>_vv,
+      satisfies_constraint_<immptab>,
+      3, false
+    );
+    DONE;
+  }
+)
+
+;; vmerge
+(define_expand "@v<vxoptab><mode>_vxm"
+  [(unspec [
+    (match_operand:VI 0 "register_operand")
+    (match_operand:<VM> 1 "register_operand")
+    (match_operand:VI 2 "vector_reg_or_const0_operand")
+    (match_operand:VI 3 "register_operand")
+    (match_operand:<VSUB> 4 "reg_or_const_int_operand")
+    (match_operand 5 "p_reg_or_const_csr_operand")
+    (match_operand 6 "const_int_operand")
+   ] VMERGEOP)]
+  "TARGET_VECTOR"
+  {
+    emit_op7 (
+      <VXOPTAB>,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_v<vxoptab><mode>_vxm_internal,
+      gen_v<vxoptab><vi_to_v64biti>_vxm_32bit,
+      gen_v<vxoptab><mode>_vvm,
+      satisfies_constraint_<immptab>,
+      4, false
+    );
+    DONE;
+  }
+)
+
 ;; vmv.v.x
 (define_expand "@v<vxoptab><mode>_v_x"
   [(unspec [
@@ -984,22 +1206,2322 @@ 
     (match_operand 4 "const_int_operand")
    ] VMVOP)]
   "TARGET_VECTOR"
-  {
-    emit_op5 (
-      <VXOPTAB>,
-      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
-      <VSUB>mode,
-      operands,
-      gen_v<vxoptab><mode>_v_x_internal,
-      gen_v<vxoptab><vi_to_v64biti>_v_x_32bit,
-      NULL,
-      satisfies_constraint_<immptab>,
-      2, false
-    );
-    DONE;
-  }
-)
+  {
+    emit_op5 (
+      <VXOPTAB>,
+      <MODE>mode, <VDI_TO_VSI>mode, <VDI_TO_VSI_VM>mode,
+      <VSUB>mode,
+      operands,
+      gen_v<vxoptab><mode>_v_x_internal,
+      gen_v<vxoptab><vi_to_v64biti>_v_x_32bit,
+      NULL,
+      satisfies_constraint_<immptab>,
+      2, false
+    );
+    DONE;
+  }
+)
+
+;; -------------------------------------------------------------------------------
+;; ---- 11. Vector Integer Arithmetic Instructions
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 11.1 Vector Single-Width Integer Add and Subtract
+;; - 11.2 Vector Widening Integer Add/Subtract
+;; - 11.3 Vector Integer Extension
+;; - 11.4 Vector Integer Add-with-Carry/Subtract-with-Borrow Instructions
+;; - 11.5 Vector Bitwise Logical Instructions
+;; - 11.6 Vector Single-Width Bit Shift Instructions
+;; - 11.7 Vector Narrowing Integer Right Shift Instructions
+;; - 11.8 Vector Integer Comparision Instructions
+;; - 11.9 Vector Integer Min/Max Instructions
+;; - 11.10 Vector Single-Width Integer Multiply Instructions
+;; - 11.11 Vector Integer Divide Instructions
+;; - 11.12 Vector Widening Integer Multiply Instructions
+;; - 11.13 Vector Single-Width Integer Multiply-Add Instructions
+;; - 11.14 Vector Widening Integer Multiply-Add Instructions
+;; - 11.15 Vector Integer Merge Instructions
+;; - 11.16 Vector Integer Move Instructions
+;; -------------------------------------------------------------------------------
+
+;; Vector-Vector Integer Add: vadd.vv.
+;; optimize the const vector that all elments are
+;; 5-bit signed immediate by using vadd.vi.
+(define_insn "@vadd<mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,  vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,   J,J,J,J")
+       (plus:VI
+         (match_operand:VI 3 "register_operand"             "vr,vr,vr,vr,   vr,vr,vr,vr")
+         (match_operand:VI 4 "vector_arith_operand"         "vr,vr,vi,vi,   vr,vr,vi,vi"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J,       0,J,0,J")
+      ] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,   rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vadd.vv\t%0,%3,%4,%1.t
+   vadd.vv\t%0,%3,%4,%1.t
+   vadd.vi\t%0,%3,%v4,%1.t
+   vadd.vi\t%0,%3,%v4,%1.t
+   vadd.vv\t%0,%3,%4
+   vadd.vv\t%0,%3,%4
+   vadd.vi\t%0,%3,%v4
+   vadd.vi\t%0,%3,%v4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Integer subtract:vsub.vv
+;; Since RVV doesn't have vsub.vi.
+;; Optimize this case using vadd.vi for const vector
+;; that all elements are 5-bit signed immediate neg value.
+(define_insn "@vsub<mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (minus:VI
+         (match_operand:VI 3 "register_operand"             "vr,vr,vr,vr,vr,vr,vr,vr")
+         (match_operand:VI 4 "vector_neg_arith_operand"     "vr,vj,vr,vj,vr,vj,vr,vj"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vsub.vv\t%0,%3,%4,%1.t
+   vadd.vi\t%0,%3,%V4,%1.t
+   vsub.vv\t%0,%3,%4,%1.t
+   vadd.vi\t%0,%3,%V4,%1.t
+   vsub.vv\t%0,%3,%4
+   vadd.vi\t%0,%3,%V4
+   vsub.vv\t%0,%3,%4
+   vadd.vi\t%0,%3,%V4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-scalar Integer add: vadd.vx
+;; Optimize the const vector that all elements
+;; are 5-bit signed immediate value with
+;; vadd.vi.
+(define_insn "@vadd<mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,  vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,   J,J,J,J")
+       (plus:VI
+         (match_operand:VI 3 "register_operand"             "vr,vr,vr,vr,   vr,vr,vr,vr")
+         (vec_duplicate:VI
+           (match_operand:<VSUB> 4 "reg_or_simm5_operand"   "r,r,Ws5,Ws5,   r,r,Ws5,Ws5")))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J,       0,J,0,J")
+      ] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,   rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vadd.vx\t%0,%3,%4,%1.t
+   vadd.vx\t%0,%3,%4,%1.t
+   vadd.vi\t%0,%3,%4,%1.t
+   vadd.vi\t%0,%3,%4,%1.t
+   vadd.vx\t%0,%3,%4
+   vadd.vx\t%0,%3,%4
+   vadd.vi\t%0,%3,%4
+   vadd.vi\t%0,%3,%4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vadd<mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"             "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"     "vm,vm,vm,vm,J,J,J,J")
+       (plus:V64BITI
+         (match_operand:V64BITI 3 "register_operand"            "vr,vr,vr,vr,vr,vr,vr,vr")
+         (vec_duplicate:V64BITI
+           (sign_extend:<VSUB>
+             (match_operand:SI 4 "reg_or_simm5_operand"         "r,Ws5,r,Ws5,r,Ws5,r,Ws5"))))
+       (match_operand:V64BITI 2 "vector_reg_or_const0_operand"  "0,0,J,J,0,0,J,J")
+      ] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vadd.vx\t%0,%3,%4,%1.t
+   vadd.vi\t%0,%3,%4,%1.t
+   vadd.vx\t%0,%3,%4,%1.t
+   vadd.vi\t%0,%3,%4,%1.t
+   vadd.vx\t%0,%3,%4
+   vadd.vi\t%0,%3,%4
+   vadd.vx\t%0,%3,%4
+   vadd.vi\t%0,%3,%4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-scalar Integer sub: vsub.vx
+;; Since RVV doesn't have vsub.vi
+;; Optimize the const vector that all elements
+;; are 5-bit signed immediate neg value with
+;; vadd.vi.
+(define_insn "@vsub<mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"                "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"   "vm,vm,vm,vm,J,J,J,J")
+       (minus:VI
+        (match_operand:VI 3 "register_operand"                "vr,vr,vr,vr,vr,vr,vr,vr")
+        (vec_duplicate:VI
+          (match_operand:<VSUB> 4 "reg_or_neg_simm5_operand"  "r,Wn5,r,Wn5,r,Wn5,r,Wn5")))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"     "0,0,J,J,0,0,J,J")
+      ] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"            "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  {
+    const char *tail = satisfies_constraint_J (operands[1]) ? "" : ",%1.t";
+    char buf[64] = {0};
+    if (satisfies_constraint_Wn5 (operands[4]))
+      {
+        const char *insn = "vadd.vi\t%0,%3";
+        snprintf (buf, sizeof (buf), "%s,%d%s", insn, (int)(-INTVAL (operands[4])), tail);
+      }
+    else
+      {
+        const char *insn = "vsub.vx\t%0,%3,%4";
+        snprintf (buf, sizeof (buf), "%s%s", insn, tail);
+      }
+    output_asm_insn (buf, operands);
+    return "";
+  }
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vsub<mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"             "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"     "vm,vm,vm,vm,J,J,J,J")
+       (minus:V64BITI
+        (match_operand:V64BITI 3 "register_operand"             "vr,vr,vr,vr,vr,vr,vr,vr")
+        (vec_duplicate:V64BITI
+          (sign_extend:<VSUB>
+           (match_operand:SI 4 "reg_or_neg_simm5_operand"       "r,Wn5,r,Wn5,r,Wn5,r,Wn5"))))
+       (match_operand:V64BITI 2 "vector_reg_or_const0_operand"  "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  {
+    const char *tail = satisfies_constraint_J (operands[1]) ? "" : ",%1.t";
+    char buf[64] = {0};
+    if (satisfies_constraint_Wn5 (operands[4]))
+      {
+        const char *insn = "vadd.vi\t%0,%3";
+        snprintf (buf, sizeof (buf), "%s,%d%s", insn, (int)(-INTVAL (operands[4])), tail);
+      }
+    else
+      {
+        const char *insn = "vsub.vx\t%0,%3,%4";
+        snprintf (buf, sizeof (buf), "%s%s", insn, tail);
+      }
+    output_asm_insn (buf, operands);
+    return "";
+  }
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar and Vector-immediate
+;; Integer Reverse Sub.
+(define_insn "@vrsub<mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (minus:VI
+        (vec_duplicate:VI
+         (match_operand:<VSUB> 4 "reg_or_simm5_operand"     "r,Ws5,r,Ws5,r,Ws5,r,Ws5"))
+        (match_operand:VI 3 "register_operand"              "vr,vr,vr,vr,vr,vr,vr,vr"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vrsub.vx\t%0,%3,%4,%1.t
+   vrsub.vi\t%0,%3,%4,%1.t
+   vrsub.vx\t%0,%3,%4,%1.t
+   vrsub.vi\t%0,%3,%4,%1.t
+   vrsub.vx\t%0,%3,%4
+   vrsub.vi\t%0,%3,%4
+   vrsub.vx\t%0,%3,%4
+   vrsub.vi\t%0,%3,%4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vrsub<mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"             "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"     "vm,vm,vm,vm,J,J,J,J")
+       (minus:V64BITI
+        (vec_duplicate:V64BITI
+         (sign_extend:<VSUB>
+          (match_operand:SI 4 "reg_or_simm5_operand"            "r,Ws5,r,Ws5,r,Ws5,r,Ws5")))
+        (match_operand:V64BITI 3 "register_operand"             "vr,vr,vr,vr,vr,vr,vr,vr"))
+       (match_operand:V64BITI 2 "vector_reg_or_const0_operand"  "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vrsub.vx\t%0,%3,%4,%1.t
+   vrsub.vi\t%0,%3,%4,%1.t
+   vrsub.vx\t%0,%3,%4,%1.t
+   vrsub.vi\t%0,%3,%4,%1.t
+   vrsub.vx\t%0,%3,%4
+   vrsub.vi\t%0,%3,%4
+   vrsub.vx\t%0,%3,%4
+   vrsub.vi\t%0,%3,%4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; pseudo-instruction vneg.v vd,vs = vrsub.vx vd,vs,x0.
+(define_insn "@vneg<mode>_v"
+  [(set (match_operand:VI 0 "register_operand"                "=vd,vd,vr,vr")
+    (unspec:VI
+      [(unspec:VI
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+         (neg:VI
+          (match_operand:VI 3 "register_operand"              "vr,vr,vr,vr"))
+         (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J")] UNSPEC_SELECT)
+       (match_operand 4 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+       (match_operand 5 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vneg.v\t%0,%3,%1.t
+   vneg.v\t%0,%3,%1.t
+   vneg.v\t%0,%3
+   vneg.v\t%0,%3"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening Signed/Unsigned Integer Add/Subtract.
+(define_insn "@vw<plus_minus:optab><u><vw>_vv"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (plus_minus:<VW>
+        (any_extend:<VW>
+          (match_operand:VWI 3 "register_operand"           "vr,vr,vr,vr"))
+        (any_extend:<VW>
+          (match_operand:VWI 4 "register_operand"           "vr,vr,vr,vr")))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vw<insn><u>.vv\t%0,%3,%4,%1.t
+   vw<insn><u>.vv\t%0,%3,%4,%1.t
+   vw<insn><u>.vv\t%0,%3,%4
+   vw<insn><u>.vv\t%0,%3,%4"
+  [(set_attr "type" "vwarith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening Signed/Unsigned Integer Add/Subtract.
+(define_insn "@vw<plus_minus:optab><u><vw>_vx"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr,&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (plus_minus:<VW>
+        (any_extend:<VW>
+          (match_operand:VWI 3 "register_operand"           "vr,vr,vr,vr,vr,vr,vr,vr"))
+        (any_extend:<VW>
+          (vec_duplicate:VWI
+            (match_operand:<VSUB> 4 "reg_or_0_operand"      "r,J,r,J,r,J,r,J"))))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vw<insn><u>.vx\t%0,%3,%4,%1.t
+   vw<insn><u>.vx\t%0,%3,zero,%1.t
+   vw<insn><u>.vx\t%0,%3,%4,%1.t
+   vw<insn><u>.vx\t%0,%3,zero,%1.t
+   vw<insn><u>.vx\t%0,%3,%4
+   vw<insn><u>.vx\t%0,%3,zero
+   vw<insn><u>.vx\t%0,%3,%4
+   vw<insn><u>.vx\t%0,%3,zero"
+  [(set_attr "type" "vwarith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening Signed/Unsigned Integer Add/Subtract.
+(define_insn "@vw<plus_minus:optab><u><vw>_wv"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (plus_minus:<VW>
+        (match_operand:<VW> 3 "register_operand"            "vr,vr,vr,vr")
+        (any_extend:<VW>
+          (match_operand:VWI 4 "register_operand"           "vr,vr,vr,vr")))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vw<insn><u>.wv\t%0,%3,%4,%1.t
+   vw<insn><u>.wv\t%0,%3,%4,%1.t
+   vw<insn><u>.wv\t%0,%3,%4
+   vw<insn><u>.wv\t%0,%3,%4"
+  [(set_attr "type" "vwarith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening Signed/Unsigned Integer Add/Subtract.
+(define_insn "@vw<plus_minus:optab><u><vw>_wx"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr,&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (plus_minus:<VW>
+        (match_operand:<VW> 3 "register_operand"            "vr,vr,vr,vr,vr,vr,vr,vr")
+        (any_extend:<VW>
+          (unspec:VWI
+             [(match_operand:<VSUB> 4 "reg_or_0_operand"    "r,J,r,J,r,J,r,J")
+            ] UNSPEC_VEC_DUPLICATE)))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vw<insn><u>.wx\t%0,%3,%4,%1.t
+   vw<insn><u>.wx\t%0,%3,zero,%1.t
+   vw<insn><u>.wx\t%0,%3,%4,%1.t
+   vw<insn><u>.wx\t%0,%3,zero,%1.t
+   vw<insn><u>.wx\t%0,%3,%4
+   vw<insn><u>.wx\t%0,%3,zero
+   vw<insn><u>.wx\t%0,%3,%4
+   vw<insn><u>.wx\t%0,%3,zero"
+  [(set_attr "type" "vwarith")
+   (set_attr "mode" "<MODE>")])
+
+;; pseudo-instruction vwcvt.x.x.v vd,vs,vm = vwadd.vx vd,vs,x0,vm.
+(define_insn "@vwcvt<u><vw>_x_x_v"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (unspec:<VW>
+         [(any_extend:<VW>
+           (match_operand:VWI 3 "register_operand"          "vr,vr,vr,vr"))] UNSPEC_DOUBLE_WIDEN)
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 4 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwcvt<u>.x.x.v\t%0,%3,%1.t
+   vwcvt<u>.x.x.v\t%0,%3,%1.t
+   vwcvt<u>.x.x.v\t%0,%3
+   vwcvt<u>.x.x.v\t%0,%3"
+  [(set_attr "type" "vwcvt")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector Double-Widening Sign-extend and Zero-extend.
+(define_insn "@v<sz>ext<vw>_vf2"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (any_extend:<VW>
+        (match_operand:VWI 3 "register_operand"             "vr,vr,vr,vr"))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 4 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<sz>ext.vf2\t%0,%3,%1.t
+   v<sz>ext.vf2\t%0,%3,%1.t
+   v<sz>ext.vf2\t%0,%3
+   v<sz>ext.vf2\t%0,%3"
+  [(set_attr "type" "vwcvt")
+   (set_attr "mode" "<VW>")])
+
+;; Vector Quad-Widening Sign-extend and Zero-extend.
+(define_insn "@v<sz>ext<vqw>_vf4"
+  [(set (match_operand:<VQW> 0 "register_operand"             "=&vr,&vr,&vr,&vr")
+  (unspec:<VQW>
+    [(unspec:<VQW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"   "vm,vm,J,J")
+       (any_extend:<VQW>
+        (match_operand:VQWI 3 "register_operand"              "vr,vr,vr,vr"))
+       (match_operand:<VQW> 2 "vector_reg_or_const0_operand"  "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 4 "p_reg_or_const_csr_operand"            "rK,rK,rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<sz>ext.vf4\t%0,%3,%1.t
+   v<sz>ext.vf4\t%0,%3,%1.t
+   v<sz>ext.vf4\t%0,%3
+   v<sz>ext.vf4\t%0,%3"
+  [(set_attr "type" "vwcvt")
+   (set_attr "mode" "<VQW>")])
+
+;; Vector Oct-Widening Sign-extend and Zero-extend.
+(define_insn "@v<sz>ext<vow>_vf8"
+  [(set (match_operand:<VOW> 0 "register_operand"             "=&vr,&vr,&vr,&vr")
+  (unspec:<VOW>
+    [(unspec:<VOW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"   "vm,vm,J,J")
+       (any_extend:<VOW>
+        (match_operand:VOWI 3 "register_operand"              "vr,vr,vr,vr"))
+       (match_operand:<VOW> 2 "vector_reg_or_const0_operand"  "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 4 "p_reg_or_const_csr_operand"            "rK,rK,rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<sz>ext.vf8\t%0,%3,%1.t
+   v<sz>ext.vf8\t%0,%3,%1.t
+   v<sz>ext.vf8\t%0,%3
+   v<sz>ext.vf8\t%0,%3"
+  [(set_attr "type" "vwcvt")
+   (set_attr "mode" "<VOW>")])
+
+;; Vector Integer Add-with-Carry/Subtract-with-Borrow Instructions
+;; For vadc and vsbc, the instruction encoding is reserved if the destination
+;; vector register is v0.
+;; Vector-Vector Produce sum with carry.
+(define_insn "@vadc<mode>_vvm"
+  [(set (match_operand:VI 0 "register_operand"          "=&vd,&vd,&vd,&vd")
+  (unspec:VI
+    [(match_operand:VI 1 "vector_reg_or_const0_operand" "0,0,J,J")
+     (plus:VI
+       (plus:VI
+         (match_operand:VI 2 "register_operand"         "vr,vr,vr,vr")
+         (match_operand:VI 3 "vector_arith_operand"     "vr,vi,vr,vi"))
+       (if_then_else:VI
+         (match_operand:<VM> 4 "register_operand"       "vm,vm,vm,vm")
+         (vec_duplicate:VI (const_int 1))
+         (vec_duplicate:VI (const_int 0))))
+     (match_operand 5 "p_reg_or_const_csr_operand"      "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vadc.vvm\t%0,%2,%3,%4
+   vadc.vim\t%0,%2,%v3,%4
+   vadc.vvm\t%0,%2,%3,%4
+   vadc.vim\t%0,%2,%v3,%4"
+  [(set_attr "type" "vadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Produce difference with borrow.
+(define_insn "@vsbc<mode>_vvm"
+  [(set (match_operand:VI 0 "register_operand"          "=&vd,&vd")
+  (unspec:VI
+    [(match_operand:VI 1 "vector_reg_or_const0_operand" "0,J")
+     (minus:VI
+       (minus:VI
+         (match_operand:VI 2 "register_operand"         "vr,vr")
+         (match_operand:VI 3 "register_operand"         "vr,vr"))
+       (if_then_else:VI
+         (match_operand:<VM> 4 "register_operand"       "vm,vm")
+         (vec_duplicate:VI (const_int 1))
+         (vec_duplicate:VI (const_int 0))))
+     (match_operand 5 "p_reg_or_const_csr_operand"      "rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "vsbc.vvm\t%0,%2,%3,%4"
+  [(set_attr "type" "vadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Produce sum with carry.
+(define_insn "@vadc<mode>_vxm_internal"
+  [(set (match_operand:VI 0 "register_operand"            "=&vd,&vd,&vd,&vd")
+  (unspec:VI
+    [(match_operand:VI 1 "vector_reg_or_const0_operand"   "0,0,J,J")
+      (plus:VI
+       (plus:VI
+         (match_operand:VI 2 "register_operand"           "vr,vr,vr,vr")
+         (vec_duplicate:VI
+           (match_operand:<VSUB> 3 "reg_or_simm5_operand" "r,Ws5,r,Ws5")))
+       (if_then_else:VI
+         (match_operand:<VM> 4 "register_operand"         "vm,vm,vm,vm")
+         (vec_duplicate:VI (const_int 1))
+         (vec_duplicate:VI (const_int 0))))
+     (match_operand 5 "p_reg_or_const_csr_operand"        "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vadc.vxm\t%0,%2,%3,%4
+   vadc.vim\t%0,%2,%3,%4
+   vadc.vxm\t%0,%2,%3,%4
+   vadc.vim\t%0,%2,%3,%4"
+  [(set_attr "type" "vadc")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vadc<mode>_vxm_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"           "=&vd,&vd,&vd,&vd")
+  (unspec:V64BITI
+    [(match_operand:V64BITI 1 "vector_reg_or_const0_operand"  "0,0,J,J")
+      (plus:V64BITI
+       (plus:V64BITI
+         (match_operand:V64BITI 2 "register_operand"          "vr,vr,vr,vr")
+         (vec_duplicate:V64BITI
+           (sign_extend:<VSUB>
+            (match_operand:SI 3 "reg_or_simm5_operand"        "r,Ws5,r,Ws5"))))
+       (if_then_else:V64BITI
+         (match_operand:<VM> 4 "register_operand"             "vm,vm,vm,vm")
+         (vec_duplicate:V64BITI (const_int 1))
+         (vec_duplicate:V64BITI (const_int 0))))
+     (match_operand:SI 5 "csr_operand"                        "rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vadc.vxm\t%0,%2,%3,%4
+   vadc.vim\t%0,%2,%3,%4
+   vadc.vxm\t%0,%2,%3,%4
+   vadc.vim\t%0,%2,%3,%4"
+  [(set_attr "type" "vadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Scalar difference with borrow.
+(define_insn "@vsbc<mode>_vxm_internal"
+  [(set (match_operand:VI 0 "register_operand"          "=&vd,&vd,&vd,&vd")
+  (unspec:VI
+    [(match_operand:VI 1 "vector_reg_or_const0_operand" "0,0,J,J")
+     (minus:VI
+       (minus:VI
+         (match_operand:VI 2 "register_operand"         "vr,vr,vr,vr")
+         (vec_duplicate:VI
+           (match_operand:<VSUB> 3 "reg_or_0_operand"   "r,J,r,J")))
+       (if_then_else:VI
+         (match_operand:<VM> 4 "register_operand"       "vm,vm,vm,vm")
+         (vec_duplicate:VI (const_int 1))
+         (vec_duplicate:VI (const_int 0))))
+     (match_operand 5 "p_reg_or_const_csr_operand"      "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vsbc.vxm\t%0,%2,%3,%4
+   vsbc.vxm\t%0,%2,zero,%4
+   vsbc.vxm\t%0,%2,%3,%4
+   vsbc.vxm\t%0,%2,zero,%4"
+  [(set_attr "type" "vadc")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vsbc<mode>_vxm_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"                   "=&vd,&vd,&vd,&vd")
+  (unspec:V64BITI
+    [(match_operand:V64BITI 1 "vector_reg_or_const0_operand"          "0,0,J,J")
+     (minus:V64BITI
+       (minus:V64BITI
+         (match_operand:V64BITI 2 "register_operand"                  "vr,vr,vr,vr")
+         (vec_duplicate:V64BITI
+           (sign_extend:<VSUB> (match_operand:SI 3 "reg_or_0_operand" "r,J,r,J"))))
+       (if_then_else:V64BITI
+         (match_operand:<VM> 4 "register_operand"                     "vm,vm,vm,vm")
+         (vec_duplicate:V64BITI (const_int 1))
+         (vec_duplicate:V64BITI (const_int 0))))
+     (match_operand:SI 5 "csr_operand"                                "rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vsbc.vxm \t%0,%2,%3,%4
+   vsbc.vxm \t%0,%2,zero,%4
+   vsbc.vxm \t%0,%2,%3,%4
+   vsbc.vxm \t%0,%2,zero,%4"
+  [(set_attr "type" "vadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Produce carry out in mask register format.
+(define_insn "@vmadc<mode>_vvm"
+  [(set (match_operand:<VM> 0 "register_operand"    "=&vr,&vr")
+  (unspec:<VM>
+    [(plus:VI
+      (plus:VI
+        (match_operand:VI 1 "register_operand"      "vr,vr")
+        (match_operand:VI 2 "vector_arith_operand"  "vr,vi"))
+      (if_then_else:VI
+        (match_operand:<VM> 3 "register_operand"    "vm,vm")
+        (vec_duplicate:VI (const_int 1))
+        (vec_duplicate:VI (const_int 0))))
+     (match_operand 4 "p_reg_or_const_csr_operand"  "rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmadc.vvm\t%0,%1,%2,%3
+   vmadc.vim\t%0,%1,%v2,%3"
+  [(set_attr "type" "vmadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Produce borrow out in mask register format.
+(define_insn "@vmsbc<mode>_vvm"
+  [(set (match_operand:<VM> 0 "register_operand"  "=&vr")
+  (unspec:<VM>
+    [(minus:VI
+      (minus:VI
+        (match_operand:VI 1 "register_operand"      "vr")
+        (match_operand:VI 2 "register_operand"      "vr"))
+      (if_then_else:VI
+        (match_operand:<VM> 3 "register_operand"    "vm")
+        (vec_duplicate:VI (const_int 1))
+        (vec_duplicate:VI (const_int 0))))
+     (match_operand 4 "p_reg_or_const_csr_operand"  "rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "vmsbc.vvm\t%0,%1,%2,%3"
+  [(set_attr "type" "vmadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Produce carry out in mask register format.
+(define_insn "@vmadc<mode>_vxm_internal"
+  [(set (match_operand:<VM> 0 "register_operand"          "=&vr,&vr")
+  (unspec:<VM>
+    [(plus:VI
+      (plus:VI
+        (match_operand:VI 1 "register_operand"            "vr,vr")
+        (vec_duplicate:VI
+          (match_operand:<VSUB> 2 "reg_or_simm5_operand"  "r,Ws5")))
+      (if_then_else:VI
+        (match_operand:<VM> 3 "register_operand"          "vm,vm")
+        (vec_duplicate:VI (const_int 1))
+        (vec_duplicate:VI (const_int 0))))
+     (match_operand 4 "p_reg_or_const_csr_operand"        "rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmadc.vxm\t%0,%1,%2,%3
+   vmadc.vim\t%0,%1,%2,%3"
+  [(set_attr "type" "vmadc")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmadc<mode>_vxm_32bit"
+  [(set (match_operand:<VM> 0 "register_operand"        "=&vr,&vr")
+  (unspec:<VM>
+    [(plus:V64BITI
+      (plus:V64BITI
+        (match_operand:V64BITI 1 "register_operand"     "vr,vr")
+        (vec_duplicate:V64BITI
+          (sign_extend:<VSUB>
+            (match_operand:SI 2 "reg_or_simm5_operand"  "r,Ws5"))))
+      (if_then_else:V64BITI
+        (match_operand:<VM> 3 "register_operand"        "vm,vm")
+        (vec_duplicate:V64BITI (const_int 1))
+        (vec_duplicate:V64BITI (const_int 0))))
+     (match_operand:SI 4 "csr_operand"                  "rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmadc.vxm\t%0,%1,%2,%3
+   vmadc.vim\t%0,%1,%2,%3"
+  [(set_attr "type" "vmadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Produce borrow out in mask register format.
+(define_insn "@vmsbc<mode>_vxm_internal"
+  [(set (match_operand:<VM> 0 "register_operand"      "=&vr,&vr")
+  (unspec:<VM>
+    [(minus:VI
+      (minus:VI
+        (match_operand:VI 1 "register_operand"        "vr,vr")
+        (vec_duplicate:VI
+          (match_operand:<VSUB> 2 "reg_or_0_operand"  "r,J")))
+      (if_then_else:VI
+        (match_operand:<VM> 3 "register_operand"      "vm,vm")
+        (vec_duplicate:VI (const_int 1))
+        (vec_duplicate:VI (const_int 0))))
+     (match_operand 4 "p_reg_or_const_csr_operand"    "rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmsbc.vxm\t%0,%1,%2,%3
+   vmsbc.vxm\t%0,%1,zero,%3"
+  [(set_attr "type" "vmadc")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmsbc<mode>_vxm_32bit"
+  [(set (match_operand:<VM> 0 "register_operand"    "=&vr,&vr")
+  (unspec:<VM>
+    [(minus:V64BITI
+      (minus:V64BITI
+        (match_operand:V64BITI 1 "register_operand" "vr,vr")
+        (vec_duplicate:V64BITI
+          (sign_extend:<VSUB>
+            (match_operand:SI 2 "reg_or_0_operand"  "r,J"))))
+      (if_then_else:V64BITI
+        (match_operand:<VM> 3 "register_operand"    "vm,vm")
+        (vec_duplicate:V64BITI (const_int 1))
+        (vec_duplicate:V64BITI (const_int 0))))
+     (match_operand:SI 4 "csr_operand"              "rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmsbc.vxm\t%0,%1,%2,%3
+   vmsbc.vxm\t%0,%1,zero,%3"
+  [(set_attr "type" "vmadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Produce carry out in mask register format.
+(define_insn "@vmadc<mode>_vv"
+  [(set (match_operand:<VM> 0 "register_operand"    "=&vr,&vr")
+    (unspec:<VM>
+      [(plus:VI
+        (match_operand:VI 1 "register_operand"      "vr,vr")
+        (match_operand:VI 2 "vector_arith_operand"  "vr,vi"))
+      (match_operand 3 "p_reg_or_const_csr_operand" "rK,rK")
+      (match_operand 4 "const_int_operand")
+      (reg:SI VL_REGNUM)
+      (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmadc.vv\t%0,%1,%2
+   vmadc.vi\t%0,%1,%v2"
+  [(set_attr "type" "vmadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Produce borrow out in mask register format.
+(define_insn "@vmsbc<mode>_vv"
+  [(set (match_operand:<VM> 0 "register_operand"    "=&vr")
+  (unspec:<VM>
+    [(minus:VI
+      (match_operand:VI 1 "register_operand"        "vr")
+      (match_operand:VI 2 "register_operand"        "vr"))
+     (match_operand 3 "p_reg_or_const_csr_operand"  "rK")
+     (match_operand 4 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "vmsbc.vv\t%0,%1,%2"
+  [(set_attr "type" "vmadc")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Produce carry out in mask register format.
+(define_insn "@vmadc<mode>_vx_internal"
+  [(set (match_operand:<VM> 0 "register_operand"          "=&vr,&vr")
+    (unspec:<VM>
+      [(plus:VI
+        (match_operand:VI 1 "register_operand"            "vr,vr")
+        (vec_duplicate:VI
+          (match_operand:<VSUB> 2 "reg_or_simm5_operand"  "r,Ws5")))
+       (match_operand 3 "p_reg_or_const_csr_operand"      "rK,rK")
+       (match_operand 4 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmadc.vx\t%0,%1,%2
+   vmadc.vi\t%0,%1,%2"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmadc<mode>_vx_32bit"
+  [(set (match_operand:<VM> 0 "register_operand"        "=&vr,&vr")
+    (unspec:<VM>
+      [(plus:V64BITI
+        (match_operand:V64BITI 1 "register_operand"     "vr,vr")
+        (vec_duplicate:V64BITI
+          (sign_extend:<VSUB>
+            (match_operand:SI 2 "reg_or_simm5_operand"  "r,Ws5"))))
+       (match_operand:SI 3 "csr_operand"                "rK,rK")
+       (match_operand 4 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmadc.vx \t%0,%1,%2
+   vmadc.vi \t%0,%1,%2"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Produce borrow out in mask register format.
+(define_insn "@vmsbc<mode>_vx_internal"
+  [(set (match_operand:<VM> 0 "register_operand"      "=&vr,&vr")
+    (unspec:<VM>
+      [(minus:VI
+        (match_operand:VI 1 "register_operand"        "vr,vr")
+        (vec_duplicate:VI
+          (match_operand:<VSUB> 2 "reg_or_0_operand"  "r,J")))
+       (match_operand 3 "p_reg_or_const_csr_operand"  "rK,rK")
+       (match_operand 4 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmsbc.vx\t%0,%1,%2
+   vmsbc.vx\t%0,%1,zero"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmsbc<mode>_vx_32bit"
+  [(set (match_operand:<VM> 0 "register_operand"    "=&vr,&vr")
+    (unspec:<VM>
+      [(minus:V64BITI
+        (match_operand:V64BITI 1 "register_operand" "vr,vr")
+        (vec_duplicate:V64BITI
+          (sign_extend:<VSUB>
+            (match_operand:SI 2 "reg_or_0_operand"  "r,J"))))
+       (match_operand:SI 3 "csr_operand"            "rK,rK")
+       (match_operand 4 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmsbc.vx \t%0,%1,%2
+   vmsbc.vx \t%0,%1,zero"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Bitwise logical operations.
+(define_insn "@v<optab><mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (any_bitwise:VI
+         (match_operand:VI 3 "register_operand"             "vr,vr,vr,vr,vr,vr,vr,vr")
+         (match_operand:VI 4 "vector_arith_operand"         "vr,vi,vr,vi,vr,vi,vr,vi"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vv\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%v4,%1.t
+   v<insn>.vv\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%v4,%1.t
+   v<insn>.vv\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%v4
+   v<insn>.vv\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%v4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Bitwise logical operations.
+(define_insn "@v<optab><mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (any_bitwise:VI
+        (match_operand:VI 3 "register_operand"              "vr,vr,vr,vr,vr,vr,vr,vr")
+        (vec_duplicate:VI
+          (match_operand:<VSUB> 4 "reg_or_simm5_operand"    "r,Ws5,r,Ws5,r,Ws5,r,Ws5")))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")
+      ] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%4
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vlogical")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@v<optab><mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"                       "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"               "vm,vm,vm,vm,J,J,J,J")
+       (any_bitwise:V64BITI
+        (match_operand:V64BITI 3 "register_operand"                       "vr,vr,vr,vr,vr,vr,vr,vr")
+        (vec_duplicate:V64BITI
+          (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_simm5_operand"  "r,Ws5,r,Ws5,r,Ws5,r,Ws5"))))
+       (match_operand:V64BITI 2 "vector_reg_or_const0_operand"            "0,0,J,J,0,0,J,J")
+      ] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                                    "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%4
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vlogical")
+   (set_attr "mode" "<MODE>")])
+
+;; pseudo-instruction vnot.v vd,vs = vxor.vi vd,vs,-1.
+(define_insn "@vnot<mode>_v"
+  [(set (match_operand:VI 0 "register_operand"                "=vd,vd,vr,vr")
+    (unspec:VI
+      [(unspec:VI
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+         (not:VI
+          (match_operand:VI 3 "register_operand"              "vr,vr,vr,vr"))
+         (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J")] UNSPEC_SELECT)
+       (match_operand 4 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+       (match_operand 5 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vnot.v\t%0,%3,%1.t
+   vnot.v\t%0,%3,%1.t
+   vnot.v\t%0,%3
+   vnot.v\t%0,%3"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Bit shift operations.
+(define_insn "@v<optab><mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (any_shift:VI
+         (match_operand:VI 3 "register_operand"             "vr,vr,vr,vr,vr,vr,vr,vr")
+         (match_operand:VI 4 "vector_arith_operand"         "vr,vk,vr,vk,vr,vk,vr,vk"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vv\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%v4,%1.t
+   v<insn>.vv\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%v4,%1.t
+   v<insn>.vv\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%v4
+   v<insn>.vv\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%v4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Bit shift operations.
+(define_insn "@v<optab><mode>_vx"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (any_shift:VI
+        (match_operand:VI 3 "register_operand"              "vr,vr,vr,vr,vr,vr,vr,vr")
+        (match_operand 4 "p_reg_or_uimm5_operand"           "r,K,r,K,r,K,r,K"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")
+      ] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vi\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%4
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vshift")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Narrowing Integer Right Shift Instructions.
+(define_insn "@vn<optab><mode>_wv"
+  [(set (match_operand:VWI 0 "register_operand"             "=vd,vd,&vd,vd,&vd,   vd,vd,&vd,vd,&vd,   vr,vr,&vr,vr,&vr,   vr,vr,&vr,vr,&vr")
+  (unspec:VWI
+    [(unspec:VWI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,vm,      vm,vm,vm,vm,vm,     J,J,J,J,J,          J,J,J,J,J")
+       (truncate:VWI
+         (any_shiftrt:<VW>
+           (match_operand:<VW> 3 "register_operand"         "0,vr,vr,0,vr,        0,vr,vr,0,vr,       0,vr,vr,0,vr,       0,vr,vr,0,vr")
+           (match_operand:VWI 4 "vector_shift_operand"      "vr,0,vr,vk,vk,       vr,0,vr,vk,vk,      vr,0,vr,vk,vk,      vr,0,vr,vk,vk")))
+       (match_operand:VWI 2 "vector_reg_or_const0_operand"  "0,0,0,0,0,           J,J,J,J,J,          0,0,0,0,0,          J,J,J,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,      rK,rK,rK,rK,rK,     rK,rK,rK,rK,rK,     rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vn<insn>.wv\t%0,%3,%4,%1.t
+   vn<insn>.wv\t%0,%3,%4,%1.t
+   vn<insn>.wv\t%0,%3,%4,%1.t
+   vn<insn>.wi\t%0,%3,%v4,%1.t
+   vn<insn>.wi\t%0,%3,%v4,%1.t
+   vn<insn>.wv\t%0,%3,%4,%1.t
+   vn<insn>.wv\t%0,%3,%4,%1.t
+   vn<insn>.wv\t%0,%3,%4,%1.t
+   vn<insn>.wi\t%0,%3,%v4,%1.t
+   vn<insn>.wi\t%0,%3,%v4,%1.t
+   vn<insn>.wv\t%0,%3,%4
+   vn<insn>.wv\t%0,%3,%4
+   vn<insn>.wv\t%0,%3,%4
+   vn<insn>.wi\t%0,%3,%v4
+   vn<insn>.wi\t%0,%3,%v4
+   vn<insn>.wv\t%0,%3,%4
+   vn<insn>.wv\t%0,%3,%4
+   vn<insn>.wv\t%0,%3,%4
+   vn<insn>.wi\t%0,%3,%v4
+   vn<insn>.wi\t%0,%3,%v4"
+  [(set_attr "type" "vshift")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Narrowing Integer Right Shift Instructions.
+(define_insn "@vn<optab><mode>_wx"
+  [(set (match_operand:VWI 0 "register_operand"             "=vd,&vd,vd,&vd,  vd,&vd,vd,&vd,  vr,&vr,vr,&vr,  vr,&vr,vr,&vr")
+  (unspec:VWI
+    [(unspec:VWI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,     vm,vm,vm,vm,    J,J,J,J,        J,J,J,J")
+        (truncate:VWI
+        (any_shiftrt:<VW>
+          (match_operand:<VW> 3 "register_operand"          "0,vr,0,vr,       0,vr,0,vr,      0,vr,0,vr,      0,vr,0,vr")
+          (match_operand 4 "p_reg_or_uimm5_operand"         "r,r,K,K,         r,r,K,K,        r,r,K,K,        r,r,K,K")))
+       (match_operand:VWI 2 "vector_reg_or_const0_operand"  "0,0,0,0,         J,J,J,J,        0,0,0,0,        J,J,J,J")
+      ] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,     rK,rK,rK,rK,    rK,rK,rK,rK,    rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vn<insn>.wx\t%0,%3,%4,%1.t
+   vn<insn>.wx\t%0,%3,%4,%1.t
+   vn<insn>.wi\t%0,%3,%4,%1.t
+   vn<insn>.wi\t%0,%3,%4,%1.t
+   vn<insn>.wx\t%0,%3,%4,%1.t
+   vn<insn>.wx\t%0,%3,%4,%1.t
+   vn<insn>.wi\t%0,%3,%4,%1.t
+   vn<insn>.wi\t%0,%3,%4,%1.t
+   vn<insn>.wx\t%0,%3,%4
+   vn<insn>.wx\t%0,%3,%4
+   vn<insn>.wi\t%0,%3,%4
+   vn<insn>.wi\t%0,%3,%4
+   vn<insn>.wx\t%0,%3,%4
+   vn<insn>.wx\t%0,%3,%4
+   vn<insn>.wi\t%0,%3,%4
+   vn<insn>.wi\t%0,%3,%4"
+  [(set_attr "type" "vshift")
+   (set_attr "mode" "<MODE>")])
+
+;; pseudo-instruction vncvt.x.x.w vd,vs,vm = vnsrl.wx vd,vs,x0,vm.
+(define_insn "@vncvt<mode>_x_x_w"
+  [(set (match_operand:VWI 0 "register_operand"               "=vd,&vd,   vd,&vd,   vr,&vr,   vr,&vr")
+  (unspec:VWI
+    [(unspec:VWI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"   "vm,vm,     vm,vm,    J,J,      J,J")
+       (truncate:VWI
+        (match_operand:<VW> 3 "register_operand"              "0,vr,      0,vr,     0,vr,     0,vr"))
+       (match_operand:VWI 2 "vector_reg_or_const0_operand"    "0,0,       J,J,      0,0,      J,J")
+      ] UNSPEC_SELECT)
+     (match_operand 4 "p_reg_or_const_csr_operand"            "rK,rK,     rK,rK,    rK,rK,    rK,rK")
+     (match_operand 5 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vncvt.x.x.w\t%0,%3,%1.t
+   vncvt.x.x.w\t%0,%3,%1.t
+   vncvt.x.x.w\t%0,%3,%1.t
+   vncvt.x.x.w\t%0,%3,%1.t
+   vncvt.x.x.w\t%0,%3
+   vncvt.x.x.w\t%0,%3
+   vncvt.x.x.w\t%0,%3
+   vncvt.x.x.w\t%0,%3"
+  [(set_attr "type" "vshift")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Integer Comparision Instructions.
+(define_insn "@vms<optab><mode>_vv"
+  [(set (match_operand:<VM> 0 "register_operand"              "=vr,vr,vm,&vr,vr,vm,&vr,   vr,vr,vm,&vr,vr,vm,&vr,   vr,vr,&vr,vr,&vr")
+    (unspec:<VM>
+      [(unspec:<VM>
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,0,vm,vm,0,vm,        vm,vm,0,vm,vm,0,vm,       J,J,J,J,J")
+         (cmp_noltge:<VM>
+           (match_operand:VI 3 "register_operand"             "0,vr,vr,vr,0,vr,vr,        0,vr,vr,vr,0,vr,vr,       0,vr,vr,0,vr")
+           (match_operand:VI 4 "vector_arith_operand"         "vr,0,vr,vr,vi,vi,vi,       vr,0,vr,vr,vi,vi,vi,      vr,0,vr,vi,vi"))
+         (match_operand:<VM> 2 "vector_reg_or_const0_operand" "0,0,0,0,0,0,0,             J,J,J,J,J,J,J,            J,J,J,J,J")
+        ] UNSPEC_SELECT)
+       (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,      rK,rK,rK,rK,rK,rK,rK,     rK,rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vv\t%0,%3,%4
+   vms<insn>.vv\t%0,%3,%4
+   vms<insn>.vv\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%v4
+   vms<insn>.vi\t%0,%3,%v4"
+  [(set_attr "type" "vcmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vms<optab><mode>_vv"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vr,vr,vm,&vr,vr,vm,&vr,   vr,vr,vm,&vr,vr,vm,&vr,   vr,vr,&vr,vr,&vr")
+    (unspec:<VM>
+      [(unspec:<VM>
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand"   "vm,vm,0,vm,vm,0,vm,        vm,vm,0,vm,vm,0,vm,       J,J,J,J,J")
+         (cmp_ltge:<VM>
+           (match_operand:VI 3 "register_operand"               "0,vr,vr,vr,0,vr,vr,        0,vr,vr,vr,0,vr,vr,       0,vr,vr,0,vr")
+           (match_operand:VI 4 "vector_neg_arith_operand"       "vr,0,vr,vr,vj,vj,vj,       vr,0,vr,vr,vj,vj,vj,      vr,0,vr,vj,vj"))
+         (match_operand:<VM> 2 "vector_reg_or_const0_operand"   "0,0,0,0,0,0,0,             J,J,J,J,J,J,J,            J,J,J,J,J")
+        ] UNSPEC_SELECT)
+       (match_operand 5 "p_reg_or_const_csr_operand"            "rK,rK,rK,rK,rK,rK,rK,      rK,rK,rK,rK,rK,rK,rK,     rK,rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vv\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vi\t%0,%3,%v4,%1.t
+   vms<insn>.vv\t%0,%3,%4
+   vms<insn>.vv\t%0,%3,%4
+   vms<insn>.vv\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%v4
+   vms<insn>.vi\t%0,%3,%v4"
+  [(set_attr "type" "vcmp")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Integer Comparision Instructions.
+(define_insn "@vms<optab><mode>_vx_internal"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vr,vm,&vr,vr,vm,&vr,  vr,vm,&vr,vr,vm,&vr,  vr,&vr,vr,&vr")
+    (unspec:<VM>
+      [(unspec:<VM>
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand"   "vm,0,vm,vm,0,vm,       vm,0,vm,vm,0,vm,      J,J,J,J")
+         (cmp_noltge:<VM>
+           (match_operand:VI 3 "register_operand"               "0,vr,vr,0,vr,vr,       0,vr,vr,0,vr,vr,      0,vr,0,vr")
+           (vec_duplicate:VI
+             (match_operand:<VSUB> 4 "reg_or_simm5_operand"     "r,r,r,Ws5,Ws5,Ws5,     r,r,r,Ws5,Ws5,Ws5,    r,r,Ws5,Ws5")))
+         (match_operand:<VM> 2 "vector_reg_or_const0_operand"   "0,0,0,0,0,0,           J,J,J,J,J,J,          J,J,J,J")
+        ] UNSPEC_SELECT)
+       (match_operand 5 "p_reg_or_const_csr_operand"            "rK,rK,rK,rK,rK,rK,     rK,rK,rK,rK,rK,rK,    rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vcmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vms<optab><mode>_vx_32bit"
+  [(set (match_operand:<VM> 0 "register_operand"                            "=vr,vm,&vr,vr,vm,&vr,  vr,vm,&vr,vr,vm,&vr,  vr,&vr,vr,&vr")
+    (unspec:<VM>
+      [(unspec:<VM>
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand"               "vm,0,vm,vm,0,vm,       vm,0,vm,vm,0,vm,      J,J,J,J")
+         (cmp_noltge:<VM>
+           (match_operand:V64BITI 3 "register_operand"                      "0,vr,vr,0,vr,vr,       0,vr,vr,0,vr,vr,      0,vr,0,vr")
+           (vec_duplicate:V64BITI
+             (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_simm5_operand" "r,r,r,Ws5,Ws5,Ws5,     r,r,r,Ws5,Ws5,Ws5,    r,r,Ws5,Ws5"))))
+         (match_operand:<VM> 2 "vector_reg_or_const0_operand"               "0,0,0,0,0,0,           J,J,J,J,J,J,          J,J,J,J")
+        ] UNSPEC_SELECT)
+       (match_operand:SI 5 "csr_operand"                                    "rK,rK,rK,rK,rK,rK,     rK,rK,rK,rK,rK,rK,    rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vcmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vms<optab><mode>_vx_internal"
+  [(set (match_operand:<VM> 0 "register_operand"                  "=vr,vm,&vr,vr,vm,&vr,  vr,vm,&vr,vr,vm,&vr,  vr,&vr,vr,&vr")
+    (unspec:<VM>
+      [(unspec:<VM>
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand"     "vm,0,vm,vm,0,vm,       vm,0,vm,vm,0,vm,      J,J,J,J")
+         (cmp_lt:<VM>
+           (match_operand:VI 3 "register_operand"                 "0,vr,vr,0,vr,vr,       0,vr,vr,0,vr,vr,      0,vr,0,vr")
+           (vec_duplicate:VI
+             (match_operand:<VSUB> 4 "reg_or_neg_simm5_operand"   "r,r,r,Wn5,Wn5,Wn5,     r,r,r,Wn5,Wn5,Wn5,    r,r,Wn5,Wn5")))
+         (match_operand:<VM> 2 "vector_reg_or_const0_operand"     "0,0,0,0,0,0,           J,J,J,J,J,J,          J,J,J,J")
+        ] UNSPEC_SELECT)
+       (match_operand 5 "p_reg_or_const_csr_operand"              "rK,rK,rK,rK,rK,rK,     rK,rK,rK,rK,rK,rK,    rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vcmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vms<optab><mode>_vx_32bit"
+  [(set (match_operand:<VM> 0 "register_operand"                                "=vr,vm,&vr,vr,vm,&vr,  vr,vm,&vr,vr,vm,&vr,  vr,&vr,vr,&vr")
+    (unspec:<VM>
+      [(unspec:<VM>
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand"                   "vm,0,vm,vm,0,vm,       vm,0,vm,vm,0,vm,      J,J,J,J")
+         (cmp_lt:<VM>
+           (match_operand:V64BITI 3 "register_operand"                          "0,vr,vr,0,vr,vr,       0,vr,vr,0,vr,vr,      0,vr,0,vr")
+           (vec_duplicate:V64BITI
+             (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_neg_simm5_operand" "r,r,r,Wn5,Wn5,Wn5,     r,r,r,Wn5,Wn5,Wn5,    r,r,Wn5,Wn5"))))
+         (match_operand:<VM> 2 "vector_reg_or_const0_operand"                   "0,0,0,0,0,0,           J,J,J,J,J,J,          J,J,J,J")
+        ] UNSPEC_SELECT)
+       (match_operand:SI 5 "csr_operand"                                        "rK,rK,rK,rK,rK,rK,     rK,rK,rK,rK,rK,rK,    rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vcmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_expand "@vms<optab><mode>_vx_internal"
+  [(parallel
+    [(set (match_operand:<VM> 0 "register_operand")
+      (unspec:<VM>
+        [(unspec:<VM>
+          [(match_operand:<VM> 1 "vector_reg_or_const0_operand")
+           (cmp_ge:<VM>
+             (match_operand:VI 3 "register_operand")
+             (vec_duplicate:VI
+               (match_operand:<VSUB> 4 "reg_or_neg_simm5_operand")))
+           (match_operand:<VM> 2 "vector_reg_or_const0_operand")] UNSPEC_SELECT)
+          (match_operand 5 "p_reg_or_const_csr_operand")
+          (match_operand 6 "const_int_operand")
+          (reg:SI VL_REGNUM)
+          (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))
+     (clobber (scratch:<VM>))])]
+  "TARGET_VECTOR"
+  {
+  })
+
+(define_expand "@vms<optab><mode>_vx_32bit"
+  [(parallel
+    [(set (match_operand:<VM> 0 "register_operand")
+      (unspec:<VM>
+        [(unspec:<VM>
+          [(match_operand:<VM> 1 "vector_reg_or_const0_operand")
+           (cmp_ge:<VM>
+             (match_operand:V64BITI 3 "register_operand")
+             (vec_duplicate:V64BITI
+               (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_neg_simm5_operand"))))
+           (match_operand:<VM> 2 "vector_reg_or_const0_operand")] UNSPEC_SELECT)
+          (match_operand:SI 5 "csr_operand")
+          (match_operand 6 "const_int_operand")
+          (reg:SI VL_REGNUM)
+          (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))
+     (clobber (scratch:<VM>))])]
+  "TARGET_VECTOR"
+  {
+  })
+
+;; When destination EEW is smaller than this source EEW, destination register and source register overlap in the
+;; lowest-numbered part of the source register grop according to vector spec(at 5.2. Vector Operands). All
+;; insructions are: integer and floating-point compare instructions, narrowing integer right shift instructions,
+;;   narrowing fixed-point clip instructions.
+;; So change case 1 to case 2 to allow the overlap case, and let overlap case has high priority.
+;;    case 1:                   case 2:
+;;    match_operand 0 &vr       match_operand 0 vr, ?&vr
+;;    match_operand 3 vr        match_operand 3 0,  vr
+(define_insn "*vms<optab><mode>_vx"
+  [(set (match_operand:<VM> 0 "register_operand"                "=vm, vd,&vd,vd,&vd,  &vd,vd,vd,&vd,  vr,&vr,vr,&vr")
+    (unspec:<VM>
+      [(unspec:<VM>
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand"   "0,   vm,vm,vm,vm,    vm,vm,vm,vm,    J,J,J,J")
+         (cmp_ge:<VM>
+           (match_operand:VI 3 "register_operand"               "vr,  0,vr,0,vr,      vr,0,0,vr,      0,vr,0,vr")
+           (vec_duplicate:VI
+             (match_operand:<VSUB> 4 "reg_or_neg_simm5_operand" "r,   r,r,Wn5,Wn5,    r,r,Wn5,Wn5,    r,r,Wn5,Wn5")))
+         (match_operand:<VM> 2 "vector_reg_or_const0_operand"   "0,   0,0,0,0,        J,J,J,J,        J,J,J,J")
+        ] UNSPEC_SELECT)
+      (match_operand 5 "p_reg_or_const_csr_operand"             "rK,  rK,rK,rK,rK,    rK,rK,rK,rK,    rK,rK,rK,rK")
+      (match_operand 6 "const_int_operand")
+      (reg:SI VL_REGNUM)
+      (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))
+   (clobber (match_scratch:<VM> 7                               "=&vr,X,X,X,X,        X,X,X,X,        X,X,X,X"))]
+  "TARGET_VECTOR"
+  "@
+   vms<insn>.vx\t%0,%3,%4,%1.t,%7
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vcmp")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "*vms<optab><mode>_vx_32bit"
+  [(set (match_operand:<VM> 0 "register_operand"                                "=vm, vd,&vd,vd,&vd,  &vd,vd,vd,&vd,  vr,&vr,vr,&vr")
+    (unspec:<VM>
+      [(unspec:<VM>
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand"                   "0,   vm,vm,vm,vm,    vm,vm,vm,vm,    J,J,J,J")
+         (cmp_ge:<VM>
+           (match_operand:V64BITI 3 "register_operand"                          "vr,  0,vr,0,vr,      vr,0,0,vr,      0,vr,0,vr")
+           (vec_duplicate:V64BITI
+             (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_neg_simm5_operand" "r,   r,r,Wn5,Wn5,    r,r,Wn5,Wn5,    r,r,Wn5,Wn5"))))
+         (match_operand:<VM> 2 "vector_reg_or_const0_operand"                   "0,   0,0,0,0,        J,J,J,J,        J,J,J,J")
+        ] UNSPEC_SELECT)
+      (match_operand 5 "p_reg_or_const_csr_operand"                             "rK,  rK,rK,rK,rK,    rK,rK,rK,rK,    rK,rK,rK,rK")
+      (match_operand 6 "const_int_operand")
+      (reg:SI VL_REGNUM)
+      (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))
+   (clobber (match_scratch:<VM> 7                                               "=&vr,X,X,X,X,        X,X,X,X,        X,X,X,X"))]
+  "TARGET_VECTOR"
+  "@
+   vms<insn>.vx\t%0,%3,%4,%1.t,%7
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vi\t%0,%3,%4,%1.t
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vx\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4
+   vms<insn>.vi\t%0,%3,%4"
+  [(set_attr "type" "vcmp")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Integer Signed/Unsigned Minimum/Maximum.
+(define_insn "@v<optab><mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (any_minmax:VI
+         (match_operand:VI 3 "register_operand"             "vr,vr,vr,vr")
+         (match_operand:VI 4 "register_operand"             "vr,vr,vr,vr"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J")
+      ] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vv\t%0,%3,%4,%1.t
+   v<insn>.vv\t%0,%3,%4,%1.t
+   v<insn>.vv\t%0,%3,%4
+   v<insn>.vv\t%0,%3,%4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Signed/Unsigned min/max.
+(define_insn "@v<optab><mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (any_minmax:VI
+        (match_operand:VI 3 "register_operand"              "vr,vr,vr,vr,vr,vr,vr,vr")
+        (vec_duplicate:VI
+          (match_operand:<VSUB> 4 "reg_or_0_operand"        "r,J,r,J,r,J,r,J")))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,zero,%1.t
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,zero,%1.t
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vx\t%0,%3,zero
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vx\t%0,%3,zero"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@v<optab><mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"                   "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"           "vm,vm,vm,vm,J,J,J,J")
+       (any_minmax:V64BITI
+        (match_operand:V64BITI 3 "register_operand"                   "vr,vr,vr,vr,vr,vr,vr,vr")
+        (vec_duplicate:V64BITI
+          (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_0_operand"  "r,J,r,J,r,J,r,J"))))
+       (match_operand:V64BITI 2 "vector_reg_or_const0_operand"        "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                                "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,zero,%1.t
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,zero,%1.t
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vx\t%0,%3,zero
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vx\t%0,%3,zero"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Signed multiply, returning low bits of product.
+(define_insn "@vmul<mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (mult:VI
+         (match_operand:VI 3 "register_operand"             "vr,vr,vr,vr")
+         (match_operand:VI 4 "register_operand"             "vr,vr,vr,vr"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmul.vv\t%0,%3,%4,%1.t
+   vmul.vv\t%0,%3,%4,%1.t
+   vmul.vv\t%0,%3,%4
+   vmul.vv\t%0,%3,%4"
+  [(set_attr "type" "varith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Signed multiply, returning low bits of product.
+(define_insn "@vmul<mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (mult:VI
+        (match_operand:VI 3 "register_operand"              "vr,vr,vr,vr,vr,vr,vr,vr")
+        (vec_duplicate:VI
+          (match_operand:<VSUB> 4 "reg_or_0_operand"        "r,J,r,J,r,J,r,J")))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmul.vx\t%0,%3,%4,%1.t
+   vmul.vx\t%0,%3,zero,%1.t
+   vmul.vx\t%0,%3,%4,%1.t
+   vmul.vx\t%0,%3,zero,%1.t
+   vmul.vx\t%0,%3,%4
+   vmul.vx\t%0,%3,zero
+   vmul.vx\t%0,%3,%4
+   vmul.vx\t%0,%3,zero"
+  [(set_attr "type" "vmul")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmul<mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"                   "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"           "vm,vm,vm,vm,J,J,J,J")
+       (mult:V64BITI
+        (match_operand:V64BITI 3 "register_operand"                   "vr,vr,vr,vr,vr,vr,vr,vr")
+        (vec_duplicate:V64BITI
+          (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_0_operand"  "r,J,r,J,r,J,r,J"))))
+       (match_operand:V64BITI 2 "vector_reg_or_const0_operand"        "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                                "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmul.vx\t%0,%3,%4,%1.t
+   vmul.vx\t%0,%3,zero,%1.t
+   vmul.vx\t%0,%3,%4,%1.t
+   vmul.vx\t%0,%3,zero,%1.t
+   vmul.vx\t%0,%3,%4
+   vmul.vx\t%0,%3,zero
+   vmul.vx\t%0,%3,%4
+   vmul.vx\t%0,%3,zero"
+  [(set_attr "type" "vmul")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Signed/Unsigned highpart multiply, returning high bits of product.
+(define_insn "@vmulh<u><mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (unspec:VI
+        [(match_operand:VI 3 "register_operand"             "vr,vr,vr,vr")
+         (match_operand:VI 4 "register_operand"             "vr,vr,vr,vr")]
+         MUL_HIGHPART)
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmulh<u>.vv\t%0,%3,%4,%1.t
+   vmulh<u>.vv\t%0,%3,%4,%1.t
+   vmulh<u>.vv\t%0,%3,%4
+   vmulh<u>.vv\t%0,%3,%4"
+  [(set_attr "type" "vmulh")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Signed/Unsigned multiply, returning high bits of product.
+(define_insn "@vmulh<u><mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (unspec:VI
+  	     [(match_operand:VI 3 "register_operand"            "vr,vr,vr,vr,vr,vr,vr,vr")
+  	      (match_operand:<VSUB> 4 "reg_or_0_operand"        "r,J,r,J,r,J,r,J")]
+         MUL_HIGHPART)
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmulh<u>.vx\t%0,%3,%4,%1.t
+   vmulh<u>.vx\t%0,%3,zero,%1.t
+   vmulh<u>.vx\t%0,%3,%4,%1.t
+   vmulh<u>.vx\t%0,%3,zero,%1.t
+   vmulh<u>.vx\t%0,%3,%4
+   vmulh<u>.vx\t%0,%3,zero
+   vmulh<u>.vx\t%0,%3,%4
+   vmulh<u>.vx\t%0,%3,zero"
+  [(set_attr "type" "vmulh")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmulh<u><mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"             "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"     "vm,vm,vm,vm,J,J,J,J")
+       (unspec:V64BITI
+  	     [(match_operand:V64BITI 3 "register_operand"           "vr,vr,vr,vr,vr,vr,vr,vr")
+  	      (match_operand:SI 4 "reg_or_0_operand"                "r,J,r,J,r,J,r,J")]
+         MUL_HIGHPART)
+       (match_operand:V64BITI 2 "vector_reg_or_const0_operand"  "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmulh<u>.vx\t%0,%3,%4,%1.t
+   vmulh<u>.vx\t%0,%3,zero,%1.t
+   vmulh<u>.vx\t%0,%3,%4,%1.t
+   vmulh<u>.vx\t%0,%3,zero,%1.t
+   vmulh<u>.vx\t%0,%3,%4
+   vmulh<u>.vx\t%0,%3,zero
+   vmulh<u>.vx\t%0,%3,%4
+   vmulh<u>.vx\t%0,%3,zero"
+  [(set_attr "type" "vmulh")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Signed(vs2)-Unsigned multiply, returning high bits of product.
+(define_insn "@vmulhsu<mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"                "=vd,vd,vr,vr")
+    (unspec:VI
+      [(unspec:VI
+        [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+         (unspec:VI
+  	       [(match_operand:VI 3 "register_operand"            "vr,vr,vr,vr")
+  	        (match_operand:VI 4 "register_operand"            "vr,vr,vr,vr")] UNSPEC_VMULHSU)
+         (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J")] UNSPEC_SELECT)
+       (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmulhsu.vv\t%0,%3,%4,%1.t
+   vmulhsu.vv\t%0,%3,%4,%1.t
+   vmulhsu.vv\t%0,%3,%4
+   vmulhsu.vv\t%0,%3,%4"
+  [(set_attr "type" "vmulh")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Signed(vs2)-Unsigned multiply, returning high bits of product.
+(define_insn "@vmulhsu<mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (unspec:VI
+  	     [(match_operand:VI 3 "register_operand"            "vr,vr,vr,vr,vr,vr,vr,vr")
+  	      (match_operand:<VSUB> 4 "reg_or_0_operand"        "r,J,r,J,r,J,r,J")]
+         UNSPEC_VMULHSU)
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmulhsu.vx\t%0,%3,%4,%1.t
+   vmulhsu.vx\t%0,%3,zero,%1.t
+   vmulhsu.vx\t%0,%3,%4,%1.t
+   vmulhsu.vx\t%0,%3,zero,%1.t
+   vmulhsu.vx\t%0,%3,%4
+   vmulhsu.vx\t%0,%3,zero
+   vmulhsu.vx\t%0,%3,%4
+   vmulhsu.vx\t%0,%3,zero"
+  [(set_attr "type" "vmulh")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmulhsu<mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"             "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"     "vm,vm,vm,vm,J,J,J,J")
+       (unspec:V64BITI
+  	     [(match_operand:V64BITI 3 "register_operand"           "vr,vr,vr,vr,vr,vr,vr,vr")
+  	      (match_operand:SI 4 "reg_or_0_operand"                "r,J,r,J,r,J,r,J")]
+         UNSPEC_VMULHSU)
+       (match_operand:V64BITI 2 "vector_reg_or_const0_operand"  "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vmulhsu.vx\t%0,%3,%4,%1.t
+   vmulhsu.vx\t%0,%3,zero,%1.t
+   vmulhsu.vx\t%0,%3,%4,%1.t
+   vmulhsu.vx\t%0,%3,zero,%1.t
+   vmulhsu.vx\t%0,%3,%4
+   vmulhsu.vx\t%0,%3,zero
+   vmulhsu.vx\t%0,%3,%4
+   vmulhsu.vx\t%0,%3,zero"
+  [(set_attr "type" "vmulh")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Signed/Unsigned divide/remainder.
+(define_insn "@v<optab><mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (any_div:VI
+         (match_operand:VI 3 "register_operand"             "vr,vr,vr,vr")
+         (match_operand:VI 4 "register_operand"             "vr,vr,vr,vr"))
+       (match_operand:VI 2 "vector_reg_or_const0_operand"   "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vv\t%0,%3,%4,%1.t
+   v<insn>.vv\t%0,%3,%4,%1.t
+   v<insn>.vv\t%0,%3,%4
+   v<insn>.vv\t%0,%3,%4"
+  [(set_attr "type" "vdiv")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Signed/Unsigned divide/remainder.
+(define_insn "@v<optab><mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+     [(match_operand:<VM> 1 "vector_reg_or_const0_operand"  "vm,vm,vm,vm,J,J,J,J")
+      (any_div:VI
+       (match_operand:VI 3 "register_operand"               "vr,vr,vr,vr,vr,vr,vr,vr")
+       (vec_duplicate:VI
+         (match_operand:<VSUB> 4 "reg_or_0_operand"         "r,J,r,J,r,J,r,J")))
+      (match_operand:VI 2 "vector_reg_or_const0_operand"    "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,zero,%1.t
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,zero,%1.t
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vx\t%0,%3,zero
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vx\t%0,%3,zero"
+  [(set_attr "type" "vdiv")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@v<optab><mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"                 "=vd,vd,vd,vd,vr,vr,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+     [(match_operand:<VM> 1 "vector_reg_or_const0_operand"          "vm,vm,vm,vm,J,J,J,J")
+      (any_div:V64BITI
+       (match_operand:V64BITI 3 "register_operand"                  "vr,vr,vr,vr,vr,vr,vr,vr")
+       (vec_duplicate:V64BITI
+         (sign_extend:<VSUB> (match_operand:SI 4 "reg_or_0_operand" "r,J,r,J,r,J,r,J"))))
+      (match_operand:V64BITI 2 "vector_reg_or_const0_operand"       "0,0,J,J,0,0,J,J")
+      ] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                              "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,zero,%1.t
+   v<insn>.vx\t%0,%3,%4,%1.t
+   v<insn>.vx\t%0,%3,zero,%1.t
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vx\t%0,%3,zero
+   v<insn>.vx\t%0,%3,%4
+   v<insn>.vx\t%0,%3,zero"
+  [(set_attr "type" "vdiv")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening Signed/Unsigned Integer multiply.
+(define_insn "@vwmul<u><vw>_vv"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (mult:<VW>
+        (any_extend:<VW>
+          (match_operand:VWI 3 "register_operand"           "vr,vr,vr,vr"))
+        (any_extend:<VW>
+          (match_operand:VWI 4 "register_operand"           "vr,vr,vr,vr")))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmul<u>.vv\t%0,%3,%4,%1.t
+   vwmul<u>.vv\t%0,%3,%4,%1.t
+   vwmul<u>.vv\t%0,%3,%4
+   vwmul<u>.vv\t%0,%3,%4"
+  [(set_attr "type" "vwarith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening Signed/Unsigned Integer multiply.
+(define_insn "@vwmul<u><vw>_vx"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr,&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (mult:<VW>
+        (any_extend:<VW>
+          (match_operand:VWI 3 "register_operand"           "vr,vr,vr,vr,vr,vr,vr,vr"))
+        (any_extend:<VW>
+          (vec_duplicate:VWI
+            (match_operand:<VSUB> 4 "reg_or_0_operand"      "r,J,r,J,r,J,r,J"))))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmul<u>.vx\t%0,%3,%4,%1.t
+   vwmul<u>.vx\t%0,%3,zero,%1.t
+   vwmul<u>.vx\t%0,%3,%4,%1.t
+   vwmul<u>.vx\t%0,%3,zero,%1.t
+   vwmul<u>.vx\t%0,%3,%4
+   vwmul<u>.vx\t%0,%3,zero
+   vwmul<u>.vx\t%0,%3,%4
+   vwmul<u>.vx\t%0,%3,zero"
+  [(set_attr "type" "vwarith")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening Signed-Unsigned Integer multiply.
+(define_insn "@vwmulsu<vw>_vv"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (mult:<VW>
+        (sign_extend:<VW>
+          (match_operand:VWI 3 "register_operand"           "vr,vr,vr,vr"))
+        (zero_extend:<VW>
+          (match_operand:VWI 4 "register_operand"           "vr,vr,vr,vr")))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,J,0,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmulsu.vv\t%0,%3,%4,%1.t
+   vwmulsu.vv\t%0,%3,%4,%1.t
+   vwmulsu.vv\t%0,%3,%4
+   vwmulsu.vv\t%0,%3,%4"
+  [(set_attr "type" "vwmul")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening Signed-Unsigned Integer multiply.
+(define_insn "@vwmulsu<vw>_vx"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr,&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,vm,vm,J,J,J,J")
+       (mult:<VW>
+        (sign_extend:<VW>
+          (match_operand:VWI 3 "register_operand"           "vr,vr,vr,vr,vr,vr,vr,vr"))
+        (zero_extend:<VW>
+          (vec_duplicate:VWI
+           (match_operand:<VSUB> 4 "reg_or_0_operand"       "r,J,r,J,r,J,r,J"))))
+       (match_operand:<VW> 2 "vector_reg_or_const0_operand" "0,0,J,J,0,0,J,J")] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK,rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmulsu.vx\t%0,%3,%4,%1.t
+   vwmulsu.vx\t%0,%3,zero,%1.t
+   vwmulsu.vx\t%0,%3,%4,%1.t
+   vwmulsu.vx\t%0,%3,zero,%1.t
+   vwmulsu.vx\t%0,%3,%4
+   vwmulsu.vx\t%0,%3,zero
+   vwmulsu.vx\t%0,%3,%4
+   vwmulsu.vx\t%0,%3,zero"
+  [(set_attr "type" "vwmul")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Single-Width Integer Multiply-Add Instructions.
+(define_insn "@v<imac><mode>_vv"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+       (unspec:VI
+         [(match_operand:VI 2 "register_operand"            "0,0")
+          (match_operand:VI 3 "register_operand"            "vr,vr")
+          (match_operand:VI 4 "register_operand"            "vr,vr")] IMAC)
+       (match_dup 2)] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<imac>.vv\t%0,%3,%4,%1.t
+   v<imac>.vv\t%0,%3,%4"
+  [(set_attr "type" "vmadd")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Single-Width Integer Multiply-Add Instructions.
+(define_insn "@v<imac><mode>_vx_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vr,vr")
+  (unspec:VI
+    [(unspec:VI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (unspec:VI
+         [(match_operand:VI 2 "register_operand"            "0,0,0,0")
+          (vec_duplicate:VI
+             (match_operand:<VSUB> 3 "reg_or_0_operand"     "r,J,r,J"))
+          (match_operand:VI 4 "register_operand"            "vr,vr,vr,vr")] IMAC)
+       (match_dup 2)] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<imac>.vx\t%0,%3,%4,%1.t
+   v<imac>.vx\t%0,zero,%4,%1.t
+   v<imac>.vx\t%0,%3,%4
+   v<imac>.vx\t%0,zero,%4"
+  [(set_attr "type" "vmadd")
+   (set_attr "mode" "<MODE>")])
+
+(define_insn "@v<imac><mode>_vx_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"                     "=vd,vd,vr,vr")
+  (unspec:V64BITI
+    [(unspec:V64BITI
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand"             "vm,vm,J,J")
+       (unspec:V64BITI
+         [(match_operand:V64BITI 2 "register_operand"                   "0,0,0,0")
+          (vec_duplicate:V64BITI
+             (sign_extend:<VSUB> (match_operand:SI 3 "reg_or_0_operand" "r,J,r,J")))
+          (match_operand:V64BITI 4 "register_operand"                   "vr,vr,vr,vr")] IMAC)
+       (match_dup 2)] UNSPEC_SELECT)
+     (match_operand:SI 5 "csr_operand"                                  "rK,rK,rK,rK")
+     (match_operand:SI 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   v<imac>.vx\t%0,%3,%4,%1.t
+   v<imac>.vx\t%0,zero,%4,%1.t
+   v<imac>.vx\t%0,%3,%4
+   v<imac>.vx\t%0,zero,%4"
+  [(set_attr "type" "vmadd")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening signed-integer multiply-add, overwrite addend.
+;; Vector-Vector Widening unsigned-integer multiply-add, overwrite addend.
+(define_insn "@vwmacc<u><vw>_vv"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+       (plus:<VW>
+         (mult:<VW>
+           (any_extend:<VW>
+             (match_operand:VWI 3 "register_operand"        "vr,vr"))
+           (any_extend:<VW>
+             (match_operand:VWI 4 "register_operand"        "vr,vr")))
+         (match_operand:<VW> 2 "register_operand"           "0,0"))
+       (match_dup 2)] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmacc<u>.vv\t%0,%3,%4,%1.t
+   vwmacc<u>.vv\t%0,%3,%4"
+  [(set_attr "type" "vwmadd")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening signed-integer multiply-add, overwrite addend.
+;; Vector-Scalar Widening unsigned-integer multiply-add, overwrite addend.
+(define_insn "@vwmacc<u><vw>_vx"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (plus:<VW>
+         (mult:<VW>
+           (any_extend:<VW>
+             (vec_duplicate:VWI
+               (match_operand:<VSUB> 3 "reg_or_0_operand"   "r,J,r,J")))
+           (any_extend:<VW>
+             (match_operand:VWI 4 "register_operand"        "vr,vr,vr,vr")))
+         (match_operand:<VW> 2 "register_operand"           "0,0,0,0"))
+       (match_dup 2)] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmacc<u>.vx\t%0,%3,%4,%1.t
+   vwmacc<u>.vx\t%0,zero,%4,%1.t
+   vwmacc<u>.vx\t%0,%3,%4
+   vwmacc<u>.vx\t%0,zero,%4"
+  [(set_attr "type" "vwmadd")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector Widening signed-unsigned-integer multiply-add, overwrite addend.
+(define_insn "@vwmaccsu<vw>_vv"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,J")
+       (plus:<VW>
+         (mult:<VW>
+           (sign_extend:<VW>
+             (match_operand:VWI 3 "register_operand"        "vr,vr"))
+           (zero_extend:<VW>
+             (match_operand:VWI 4 "register_operand"        "vr,vr")))
+         (match_operand:<VW> 2 "register_operand"           "0,0"))
+       (match_dup 2)] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmaccsu.vv\t%0,%3,%4,%1.t
+   vwmaccsu.vv\t%0,%3,%4"
+  [(set_attr "type" "vwmadd")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening signed-unsigned-integer multiply-add, overwrite addend.
+(define_insn "@vwmaccsu<vw>_vx"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (plus:<VW>
+         (mult:<VW>
+           (sign_extend:<VW>
+             (vec_duplicate:VWI
+               (match_operand:<VSUB> 3 "reg_or_0_operand"   "r,J,r,J")))
+           (zero_extend:<VW>
+             (match_operand:VWI 4 "register_operand"        "vr,vr,vr,vr")))
+         (match_operand:<VW> 2 "register_operand"           "0,0,0,0"))
+       (match_dup 2)] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmaccsu.vx\t%0,%3,%4,%1.t
+   vwmaccsu.vx\t%0,zero,%4,%1.t
+   vwmaccsu.vx\t%0,%3,%4
+   vwmaccsu.vx\t%0,zero,%4"
+  [(set_attr "type" "vwmadd")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar Widening unsigned-signed-integer multiply-add, overwrite addend.
+(define_insn "@vwmaccus<vw>_vx"
+  [(set (match_operand:<VW> 0 "register_operand"            "=&vr,&vr,&vr,&vr")
+  (unspec:<VW>
+    [(unspec:<VW>
+      [(match_operand:<VM> 1 "vector_reg_or_const0_operand" "vm,vm,J,J")
+       (plus:<VW>
+         (mult:<VW>
+           (zero_extend:<VW>
+             (vec_duplicate:VWI
+               (match_operand:<VSUB> 3 "reg_or_0_operand"   "r,J,r,J")))
+           (sign_extend:<VW>
+             (match_operand:VWI 4 "register_operand"        "vr,vr,vr,vr")))
+         (match_operand:<VW> 2 "register_operand"           "0,0,0,0"))
+       (match_dup 2)] UNSPEC_SELECT)
+     (match_operand 5 "p_reg_or_const_csr_operand"          "rK,rK,rK,rK")
+     (match_operand 6 "const_int_operand")
+     (reg:SI VL_REGNUM)
+     (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+  "TARGET_VECTOR"
+  "@
+   vwmaccus.vx\t%0,%3,%4,%1.t
+   vwmaccus.vx\t%0,zero,%4,%1.t
+   vwmaccus.vx\t%0,%3,%4
+   vwmaccus.vx\t%0,zero,%4"
+  [(set_attr "type" "vwmadd")
+   (set_attr "mode" "<MODE>")])
+
+;; Vector-Vector integer and float merge.
+(define_insn "@vmerge<mode>_vvm"
+  [(set (match_operand:V 0 "register_operand"             "=vd,vd,vd,vd")
+    (unspec:V
+      [(match_operand:V 2 "vector_reg_or_const0_operand"  "0,0,J,J")
+       (unspec:V
+        [(match_operand:<VM> 1 "register_operand"         "vm,vm,vm,vm")
+         (match_operand:V 3 "register_operand"            "vr,vr,vr,vr")
+         (match_operand:V 4 "vector_arith_operand"        "vr,vi,vr,vi")] UNSPEC_MERGE)
+       (match_operand 5 "p_reg_or_const_csr_operand"      "rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+  vmerge.vvm\t%0,%3,%4,%1
+  vmerge.vim\t%0,%3,%v4,%1
+  vmerge.vvm\t%0,%3,%4,%1
+  vmerge.vim\t%0,%3,%v4,%1"
+ [(set_attr "type" "vmerge")
+  (set_attr "mode" "<MODE>")])
+
+;; Vector-Scalar integer merge.
+(define_insn "@vmerge<mode>_vxm_internal"
+  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd")
+    (unspec:VI
+      [(match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J")
+        (unspec:VI
+          [(match_operand:<VM> 1 "register_operand"         "vm,vm,vm,vm")
+           (match_operand:VI 3 "register_operand"           "vr,vr,vr,vr")
+           (vec_duplicate:VI
+             (match_operand:<VSUB> 4 "reg_or_simm5_operand" "r,Ws5,r,Ws5"))] UNSPEC_MERGE)
+       (match_operand 5 "p_reg_or_const_csr_operand"        "rK,rK,rK,rK")
+       (match_operand 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+  vmerge.vxm\t%0,%3,%4,%1
+  vmerge.vim\t%0,%3,%4,%1
+  vmerge.vxm\t%0,%3,%4,%1
+  vmerge.vim\t%0,%3,%4,%1"
+ [(set_attr "type" "vmerge")
+  (set_attr "mode" "<MODE>")])
+
+(define_insn "@vmerge<mode>_vxm_32bit"
+  [(set (match_operand:V64BITI 0 "register_operand"             "=vd,vd,vd,vd")
+    (unspec:V64BITI
+      [(match_operand:V64BITI 2 "vector_reg_or_const0_operand"  "0,0,J,J")
+        (unspec:V64BITI
+          [(match_operand:<VM> 1 "register_operand"             "vm,vm,vm,vm")
+           (match_operand:V64BITI 3 "register_operand"          "vr,vr,vr,vr")
+           (vec_duplicate:V64BITI
+             (sign_extend:<VSUB>
+              (match_operand:SI 4 "reg_or_simm5_operand"        "r,Ws5,r,Ws5")))] UNSPEC_MERGE)
+       (match_operand:SI 5 "csr_operand"                        "rK,rK,rK,rK")
+       (match_operand:SI 6 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "@
+  vmerge.vxm\t%0,%3,%4,%1
+  vmerge.vim\t%0,%3,%4,%1
+  vmerge.vxm\t%0,%3,%4,%1
+  vmerge.vim\t%0,%3,%4,%1"
+ [(set_attr "type" "vmerge")
+  (set_attr "mode" "<MODE>")])
 
+;; Vector-Vector Integer/Float Move.
+(define_insn "@vmv<mode>_v_v"
+  [(set (match_operand:V 0 "register_operand"             "=vr,vr")
+    (unspec:V
+      [(match_operand:V 1 "vector_reg_or_const0_operand"  "0,J")
+       (unspec:V
+        [(match_operand:V 2 "register_operand"            "vr,vr")] UNSPEC_MOVE)
+       (match_operand 3 "p_reg_or_const_csr_operand"      "rK,rK")
+       (match_operand 4 "const_int_operand")
+       (reg:SI VL_REGNUM)
+       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
+ "TARGET_VECTOR"
+ "vmv.v.v\t%0,%2"
+ [(set_attr "type" "vmove")
+  (set_attr "mode" "<MODE>")])
+  
 ;; Vector-Scalar Integer Move.
 (define_insn "@vmv<mode>_v_x_internal"
   [(set (match_operand:VI 0 "register_operand"          "=vr,vr,vr,vr")
@@ -1055,29 +3577,6 @@ 
  [(set_attr "type" "vmove")
   (set_attr "mode" "<MODE>")])
 
-;; Vector-Scalar integer merge.
-(define_insn "@vmerge<mode>_vxm_internal"
-  [(set (match_operand:VI 0 "register_operand"              "=vd,vd,vd,vd")
-    (unspec:VI
-      [(match_operand:VI 2 "vector_reg_or_const0_operand"   "0,0,J,J")
-        (unspec:VI
-          [(match_operand:<VM> 1 "register_operand"         "vm,vm,vm,vm")
-           (match_operand:VI 3 "register_operand"           "vr,vr,vr,vr")
-           (vec_duplicate:VI
-             (match_operand:<VSUB> 4 "reg_or_simm5_operand" "r,Ws5,r,Ws5"))] UNSPEC_MERGE)
-       (match_operand 5 "p_reg_or_const_csr_operand"        "rK,rK,rK,rK")
-       (match_operand 6 "const_int_operand")
-       (reg:SI VL_REGNUM)
-       (reg:SI VTYPE_REGNUM)] UNSPEC_RVV))]
- "TARGET_VECTOR"
- "@
-  vmerge.vxm\t%0,%3,%4,%1
-  vmerge.vim\t%0,%3,%4,%1
-  vmerge.vxm\t%0,%3,%4,%1
-  vmerge.vim\t%0,%3,%4,%1"
- [(set_attr "type" "vmerge")
-  (set_attr "mode" "<MODE>")])
-
 ;; vmclr.m vd -> vmxor.mm vd,vd,vd # Clear mask register
 (define_insn "@vmclr<mode>_m"
   [(set (match_operand:VB 0 "register_operand"        "=vr")