[3/7] RISC-V: Add intrinsic functions for crypto vector Zvkg extension

Message ID 20231204025709.3783-3-wangfeng@eswincomputing.com
State Deferred
Delegated to: Jeff Law
Headers
Series None |

Checks

Context Check Description
rivoscibot/toolchain-ci-rivos-lint warning Lint failed
rivoscibot/toolchain-ci-rivos-apply-patch fail Patch failed to apply

Commit Message

Feng Wang Dec. 4, 2023, 2:57 a.m. UTC
  This patch add the intrinsic functions(according to https://github.com/
riscv-non-isa/rvv-intrinsic-doc/blob/eopc/vector-crypto/auto-generated/
vector-crypto/intrinsic_funcs.md) for crypto vector Zvkg extension. And all
the test cases are added for api-testing.

Co-Authored by: Songhe Zhu <zhusonghe@eswincomputing.com>
Co-Authored by: Ciyan Pan <panciyan@eswincomputing.com>

gcc/ChangeLog:

        * common/config/riscv/riscv-common.cc: Add Zvkg in riscv_implied_info.
        * config/riscv/riscv-vector-builtins-bases.cc (class vghsh):Add new function_base for Zvkg.
        (class vgmul): Ditto.
        (BASE): Add Zvkg BASE declaration.
        * config/riscv/riscv-vector-builtins-bases.h:Ditto.
        * config/riscv/riscv-vector-builtins-shapes.cc (struct crypto_vv_def): Add function_builder for Zvkg.
        (SHAPE): Add Zvkg SHAPE declaration.
        * config/riscv/riscv-vector-builtins-shapes.h:Dito.
        * config/riscv/riscv-vector-builtins.cc: Define new data struct for Zvkg.
        * config/riscv/riscv-vector-crypto-builtins-avail.h (AVAIL): Add enable condition.
        * config/riscv/riscv-vector-crypto-builtins-functions.def (vghsh): Add intrinsc def.
        (vgmul): Ditto.
        * config/riscv/riscv.md: Add Zvkg ins name.
        * config/riscv/vector-crypto.md (@pred_vghsh<VSI:mode>): Add Zvkg md patterns.
        (@pred_vgmul<VSI:mode>): Ditto.
        * config/riscv/vector-iterators.md: Add new iterators for Zvkg.
        * config/riscv/vector.md: Add the corresponding attribute for Zvkg.

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/zvk/zvk.exp:
        * gcc.target/riscv/zvk/zvkg/vghsh.c: New test.
        * gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c: New test.
        * gcc.target/riscv/zvk/zvkg/vgmul.c: New test.
        * gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c: New test.
---
 gcc/common/config/riscv/riscv-common.cc       |  1 +
 .../riscv/riscv-vector-builtins-bases.cc      | 29 +++++++++++
 .../riscv/riscv-vector-builtins-bases.h       |  2 +
 .../riscv/riscv-vector-builtins-shapes.cc     | 23 +++++++++
 .../riscv/riscv-vector-builtins-shapes.h      |  1 +
 gcc/config/riscv/riscv-vector-builtins.cc     | 15 ++++++
 .../riscv-vector-crypto-builtins-avail.h      |  1 +
 ...riscv-vector-crypto-builtins-functions.def |  3 ++
 gcc/config/riscv/riscv.md                     |  4 +-
 gcc/config/riscv/vector-crypto.md             | 43 +++++++++++++++-
 gcc/config/riscv/vector-iterators.md          |  4 ++
 gcc/config/riscv/vector.md                    | 19 +++----
 gcc/testsuite/gcc.target/riscv/zvk/zvk.exp    |  2 +
 .../gcc.target/riscv/zvk/zvkg/vghsh.c         | 51 +++++++++++++++++++
 .../riscv/zvk/zvkg/vghsh_overloaded.c         | 51 +++++++++++++++++++
 .../gcc.target/riscv/zvk/zvkg/vgmul.c         | 51 +++++++++++++++++++
 .../riscv/zvk/zvkg/vgmul_overloaded.c         | 51 +++++++++++++++++++
 17 files changed, 340 insertions(+), 11 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c
  

Patch

diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc
index 296500e15df..3eefd0263f9 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -123,6 +123,7 @@  static const riscv_implied_info_t riscv_implied_info[] =
   {"zvbb",  "zvkb"},
   {"zvbc",     "v"},
   {"zvkb",     "v"},
+  {"zvkg",     "v"},
 
   {"zfh", "zfhmin"},
   {"zfhmin", "f"},
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc
index 45b1e563ff4..0cb9b2925af 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc
@@ -2227,6 +2227,31 @@  public:
   }
 };
 
+class vghsh : public function_base
+{
+public:
+  bool apply_mask_policy_p () const override { return false; }
+  bool use_mask_predication_p () const override { return false; }
+  bool has_merge_operand_p () const override { return false; }
+  rtx expand (function_expander &e) const override
+  {
+      return e.use_exact_insn (code_for_pred_vghsh (e.vector_mode ()));
+  }
+};
+
+
+class vgmul : public function_base
+{
+public:
+  bool apply_mask_policy_p () const override { return false; }
+  bool use_mask_predication_p () const override { return false; }
+  bool has_merge_operand_p () const override { return false; }
+  rtx expand (function_expander &e) const override
+  {
+      return e.use_exact_insn (code_for_pred_vgmul (e.vector_mode ()));
+  }
+};
+
 static CONSTEXPR const vsetvl<false> vsetvl_obj;
 static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
@@ -2496,6 +2521,8 @@  static CONSTEXPR const vcltz<UNSPEC_VCTZ>        vctz_obj;
 static CONSTEXPR const vwsll vwsll_obj;
 static CONSTEXPR const clmul<UNSPEC_VCLMUL>      vclmul_obj;
 static CONSTEXPR const clmul<UNSPEC_VCLMULH>     vclmulh_obj;
+static CONSTEXPR const vghsh vghsh_obj;
+static CONSTEXPR const vgmul vgmul_obj;
 
 /* Declare the function base NAME, pointing it to an instance
    of class <NAME>_obj.  */
@@ -2770,4 +2797,6 @@  BASE (vror)
 BASE (vwsll)
 BASE (vclmul)
 BASE (vclmulh)
+BASE (vghsh)
+BASE (vgmul)
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.h b/gcc/config/riscv/riscv-vector-builtins-bases.h
index 7d2c86f9162..6a389113e1f 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.h
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.h
@@ -292,6 +292,8 @@  extern const function_base *const vror;
 extern const function_base *const vwsll;
 extern const function_base *const vclmul;
 extern const function_base *const vclmulh;
+extern const function_base *const vghsh;
+extern const function_base *const vgmul;
 }
 
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
index f21c459e6a2..dd62d8b11b6 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
@@ -1009,6 +1009,28 @@  struct zvbb_zvbc_def : public build_base
   }
 };
 
+/* vghsh/vgmul class.  */
+struct crypto_vv_def : public build_base
+{
+  char *get_name (function_builder &b, const function_instance &instance,
+                  bool overloaded_p) const override
+  {
+    /* Return nullptr if it can not be overloaded.  */
+    if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+      return nullptr;
+    b.append_base_name (instance.base_name);
+
+    if (!overloaded_p)
+    {
+      b.append_name (operand_suffixes[instance.op_info->op]);
+      b.append_name (type_suffixes[instance.type.index].vector);
+    }
+
+    b.append_name (predication_suffixes[instance.pred]);
+    return b.finish_name ();
+  }
+};
+
 SHAPE(vsetvl, vsetvl)
 SHAPE(vsetvl, vsetvlmax)
 SHAPE(loadstore, loadstore)
@@ -1038,4 +1060,5 @@  SHAPE(seg_loadstore, seg_loadstore)
 SHAPE(seg_indexed_loadstore, seg_indexed_loadstore)
 SHAPE(seg_fault_load, seg_fault_load)
 SHAPE(zvbb_zvbc, zvbb_zvbc)
+SHAPE(crypto_vv, crypto_vv)
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.h b/gcc/config/riscv/riscv-vector-builtins-shapes.h
index a217eae33f0..37b7077a3b1 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.h
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.h
@@ -54,6 +54,7 @@  extern const function_shape *const seg_indexed_loadstore;
 extern const function_shape *const seg_fault_load;
 /* Below function_shape are Vectro Crypto*/
 extern const function_shape *const zvbb_zvbc;
+extern const function_shape *const crypto_vv;
 }
 
 } // end namespace riscv_vector
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc
index ffd30c1a806..eaefb0f18cc 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -2639,6 +2639,21 @@  static CONSTEXPR const rvv_op_info all_v_vcreate_lmul4_x2_ops
      rvv_arg_type_info (RVV_BASE_vlmul_ext_x2), /* Return type */
      ext_vcreate_args /* Args */};
 
+/* A static operand information for vector_type func (vector_type).
+   Some ins just supports SEW=32, such as crypto vectol Zvkg extension.
+ * function registration.  */
+static CONSTEXPR const rvv_op_info u_vvv_crypto_sew32_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vv,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vv_args /* Args */};
+
+static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew32_ops
+  = {crypto_sew32_ops,			   /* Types */
+     OP_TYPE_vv,					   /* Suffix */
+     rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+     vvv_args /* Args */};
+
 /* A static operand information for vector_type func (vector_type).
    Some ins just supports SEW=64, such as crypto vectol Zvbc extension
    vclmul.vv, vclmul.vx.
diff --git a/gcc/config/riscv/riscv-vector-crypto-builtins-avail.h b/gcc/config/riscv/riscv-vector-crypto-builtins-avail.h
index a63dea6a27b..fb1f195bf9b 100755
--- a/gcc/config/riscv/riscv-vector-crypto-builtins-avail.h
+++ b/gcc/config/riscv/riscv-vector-crypto-builtins-avail.h
@@ -15,5 +15,6 @@  namespace riscv_vector {
 AVAIL (zvbb, TARGET_ZVBB)
 AVAIL (zvbc, TARGET_ZVBC)
 AVAIL (zvkb_or_zvbb, TARGET_ZVKB || TARGET_ZVBB)
+AVAIL (zvkg, TARGET_ZVKG)
 }
 #endif
diff --git a/gcc/config/riscv/riscv-vector-crypto-builtins-functions.def b/gcc/config/riscv/riscv-vector-crypto-builtins-functions.def
index d8c74dec4f6..c2ed9353e24 100755
--- a/gcc/config/riscv/riscv-vector-crypto-builtins-functions.def
+++ b/gcc/config/riscv/riscv-vector-crypto-builtins-functions.def
@@ -22,3 +22,6 @@  DEF_VECTOR_CRYPTO_FUNCTION (vclmul,  zvbb_zvbc, full_preds, u_vvv_crypto_sew64_o
 DEF_VECTOR_CRYPTO_FUNCTION (vclmul,  zvbb_zvbc, full_preds, u_vvx_crypto_sew64_ops, zvbc)
 DEF_VECTOR_CRYPTO_FUNCTION (vclmulh, zvbb_zvbc, full_preds, u_vvv_crypto_sew64_ops, zvbc)
 DEF_VECTOR_CRYPTO_FUNCTION (vclmulh, zvbb_zvbc, full_preds, u_vvx_crypto_sew64_ops, zvbc)
+//ZVKG
+DEF_VECTOR_CRYPTO_FUNCTION(vghsh, crypto_vv, none_tu_preds, u_vvvv_crypto_sew32_ops, zvkg)
+DEF_VECTOR_CRYPTO_FUNCTION(vgmul, crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops,  zvkg)
\ No newline at end of file
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index 4a853d8238f..1ead762e552 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -439,6 +439,8 @@ 
 ;; vwsll        crypto vector widening shift left logical instructions
 ;; vclmul       vector crypto carry-less multiply - return low half instructions
 ;; vclmulh      vector crypto carry-less multiply - return high half instructions
+;; vghsh        vector crypto add-multiply over GHASH Galois-Field instructions
+;; vgmul        vector crypto multiply over GHASH Galois-Field instrumctions
 (define_attr "type"
   "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
    mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
@@ -459,7 +461,7 @@ 
    vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,
    vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
    vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,vror,vwsll,
-   vclmul,vclmulh"
+   vclmul,vclmulh,vghsh,vgmul"
   (cond [(eq_attr "got" "load") (const_string "load")
 
 	 ;; If a doubleword move uses these expensive instructions,
diff --git a/gcc/config/riscv/vector-crypto.md b/gcc/config/riscv/vector-crypto.md
index f3034ba122a..edc7dc9d432 100755
--- a/gcc/config/riscv/vector-crypto.md
+++ b/gcc/config/riscv/vector-crypto.md
@@ -11,6 +11,8 @@ 
     UNSPEC_VWSLL
     UNSPEC_VCLMUL
     UNSPEC_VCLMULH
+    UNSPEC_VGHSH
+    UNSPEC_VGMUL
 ])
 
 (define_int_attr ror_rol [(UNSPEC_VROL "rol") (UNSPEC_VROR "ror")])
@@ -254,4 +256,43 @@ 
   "TARGET_ZVBC && TARGET_64BIT"
   "vclmul<h>.vx\t%0,%3,%4%p1"
   [(set_attr "type" "vclmul<h>")
-   (set_attr "mode" "<VDI:MODE>")])
\ No newline at end of file
+   (set_attr "mode" "<VDI:MODE>")])
+
+;; zvkg instructions patterns.
+;; vghsh.vv  vgmul.vv
+(define_insn "@pred_vghsh<VSI:mode>"
+  [(set (match_operand:VSI 0 "register_operand"           "=vd")
+        (if_then_else:VSI
+          (unspec:<VSI:VM>
+            [(match_operand 4 "vector_length_operand"     "rK")
+             (match_operand 5 "const_int_operand"         " i")
+             (match_operand 6 "const_int_operand"         " i")
+             (reg:SI VL_REGNUM)
+             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (unspec:VSI
+             [(match_operand:VSI 1 "register_operand" " 0")
+              (match_operand:VSI 2 "register_operand" "vr")
+			  (match_operand:VSI 3 "register_operand" "vr")] UNSPEC_VGHSH)
+          (match_dup 1)))]
+  "TARGET_ZVKG"
+  "vghsh.vv\t%0,%2,%3"
+  [(set_attr "type" "vghsh")
+   (set_attr "mode" "<VSI:MODE>")])
+
+(define_insn "@pred_vgmul<VSI:mode>"
+  [(set (match_operand:VSI 0 "register_operand"           "=vd")
+        (if_then_else:VSI
+          (unspec:<VSI:VM>
+            [(match_operand 3 "vector_length_operand"     "rK")
+             (match_operand 4 "const_int_operand"         " i")
+             (match_operand 5 "const_int_operand"         " i")
+             (reg:SI VL_REGNUM)
+             (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+          (unspec:VSI
+             [(match_operand:VSI 1 "register_operand" " 0")
+              (match_operand:VSI 2 "register_operand" "vr")] UNSPEC_VGMUL)
+          (match_dup 1)))]
+  "TARGET_ZVKG"
+  "vgmul.vv\t%0,%2"
+  [(set_attr "type" "vgmul")
+   (set_attr "mode" "<VSI:MODE>")])
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index e52709493f6..fea84a3f54c 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -3917,6 +3917,10 @@ 
   (V2048BI "riscv_vector::vls_mode_valid_p (V2048BImode) && TARGET_MIN_VLEN >= 2048")
   (V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 4096")])
 
+(define_mode_iterator VSI [
+  RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32")
+])
+
 (define_mode_iterator VDI [
   (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64")
   (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64")
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 2733ea7728f..aa529d6378f 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -53,7 +53,7 @@ 
 			  vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
 			  vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
 			  vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,\
-                          vror,vwsll,vclmul,vclmulh")
+                          vror,vwsll,vclmul,vclmulh,vghsh,vgmul")
 	 (const_string "true")]
 	(const_string "false")))
 
@@ -76,7 +76,7 @@ 
 			  vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
 			  vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
 			  vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,\
-                          vror,vwsll,vclmul,vclmulh")
+                          vror,vwsll,vclmul,vclmulh,vghsh,vgmul")
 	 (const_string "true")]
 	(const_string "false")))
 
@@ -704,7 +704,7 @@ 
                                 vandn,vbrev,vbrev8,vrev8,vclz,vctz,vrol,vror,vwsll,vclmul,vclmulh")
 	       (const_int 2)
 
-	       (eq_attr "type" "vimerge,vfmerge,vcompress")
+	       (eq_attr "type" "vimerge,vfmerge,vcompress,vghsh,vgmul")
 	       (const_int 1)
 
 	       (eq_attr "type" "vimuladd,vfmuladd")
@@ -743,7 +743,8 @@ 
 			  vstox,vext,vmsfs,vmiota,vfsqrt,vfrecp,vfcvtitof,vldff,\
 			  vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,\
 			  vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,vcompress,\
-			  vlsegde,vssegts,vssegtux,vssegtox,vlsegdff,vbrev,vbrev8,vrev8")
+			  vlsegde,vssegts,vssegtux,vssegtox,vlsegdff,vbrev,vbrev8,vrev8,\
+                          vghsh")
 	   (const_int 4)
 
 	 ;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
@@ -765,7 +766,7 @@ 
 	 (eq_attr "type" "vicmp,vimuladd,vfcmp,vfmuladd")
 	   (const_int 6)
 
-	 (eq_attr "type" "vmpop,vmffs,vmidx,vssegte,vclz,vctz")
+	 (eq_attr "type" "vmpop,vmffs,vmidx,vssegte,vclz,vctz,vgmul")
 	   (const_int 3)]
   (const_int INVALID_ATTRIBUTE)))
 
@@ -774,7 +775,7 @@ 
   (cond [(eq_attr "type" "vlde,vimov,vfmov,vext,vmiota,vfsqrt,vfrecp,\
 			  vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,\
 			  vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,\
-			  vcompress,vldff,vlsegde,vlsegdff,vbrev,vbrev8,vrev8")
+			  vcompress,vldff,vlsegde,vlsegdff,vbrev,vbrev8,vrev8,vghsh")
 	   (symbol_ref "riscv_vector::get_ta(operands[5])")
 
 	 ;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
@@ -796,7 +797,7 @@ 
 	 (eq_attr "type" "vimuladd,vfmuladd")
 	   (symbol_ref "riscv_vector::get_ta(operands[7])")
 
-	 (eq_attr "type" "vmidx")
+	 (eq_attr "type" "vmidx,vgmul")
 	   (symbol_ref "riscv_vector::get_ta(operands[4])")]
 	(const_int INVALID_ATTRIBUTE)))
 
@@ -838,7 +839,7 @@ 
 			  vfclass,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
 			  vimovxv,vfmovfv,vlsegde,vlsegdff,vbrev,vbrev8,vrev8")
 	   (const_int 7)
-	 (eq_attr "type" "vldm,vstm,vmalu,vmalu")
+	 (eq_attr "type" "vldm,vstm,vmalu,vmalu,vgmul")
 	   (const_int 5)
 
 	 ;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
@@ -861,7 +862,7 @@ 
 	 (eq_attr "type" "vimuladd,vfmuladd")
 	   (const_int 9)
 
-	 (eq_attr "type" "vmsfs,vmidx,vcompress")
+	 (eq_attr "type" "vmsfs,vmidx,vcompress,vghsh")
 	   (const_int 6)
 
 	 (eq_attr "type" "vmpop,vmffs,vssegte,vclz,vctz")
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp b/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
index 2426825baae..c1b9eede6ba 100644
--- a/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvk.exp
@@ -38,6 +38,8 @@  dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvbb/*.\[cS\]]] \
         "" $DEFAULT_CFLAGS
 dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvbc/*.\[cS\]]] \
         "" $DEFAULT_CFLAGS
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/zvkg/*.\[cS\]]] \
+        "" $DEFAULT_CFLAGS
 
 # All done.
 dg-finish
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh.c
new file mode 100644
index 00000000000..3837f99fea3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkg -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vghsh\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c
new file mode 100644
index 00000000000..2d2004bc653
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vghsh_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkg -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vghsh(vd, vs2, vs1, vl);
+}
+
+vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vghsh_tu(vd, vs2, vs1, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vghsh\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul.c
new file mode 100644
index 00000000000..902de106c12
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkg -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32mf2(vd, vs2, vl);
+}
+
+vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m1(vd, vs2, vl);
+}
+
+vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m2(vd, vs2, vl);
+}
+
+vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m4(vd, vs2, vl);
+}
+
+vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m8(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vgmul\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c
new file mode 100644
index 00000000000..53397ebc69b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zvk/zvkg/vgmul_overloaded.c
@@ -0,0 +1,51 @@ 
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zvkg -mabi=lp64d -O2 -Wno-psabi" } */
+#include <stdint.h>
+#include <riscv_vector.h>
+
+typedef _Float16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2,  size_t vl) {
+  return __riscv_vgmul(vd, vs2, vl);
+}
+
+vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return __riscv_vgmul_tu(vd, vs2, vl);
+}
+
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*ta,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s*zero,\s*[a-x0-9]+,\s*[a-x0-9]+,m[a-x0-9]+,\s*tu,\s*ma} 5 } } */
+/* { dg-final { scan-assembler-times {vgmul\.vv\s+v[0-9]+,\s*v[0-9]} 10 } } */