[v1] Vect: Support IFN SAT_TRUNC for unsigned vector int
Checks
Context |
Check |
Description |
linaro-tcwg-bot/tcwg_gcc_build--master-arm |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_gcc_check--master-arm |
success
|
Test passed
|
linaro-tcwg-bot/tcwg_gcc_build--master-aarch64 |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_gcc_check--master-aarch64 |
success
|
Test passed
|
Commit Message
From: Pan Li <pan2.li@intel.com>
This patch would like to support the .SAT_TRUNC for the unsigned
vector int. Given we have below example code:
Form 1
#define VEC_DEF_SAT_U_TRUC_FMT_1(NT, WT) \
void __attribute__((noinline)) \
vec_sat_u_truc_##WT##_to_##NT##_fmt_1 (NT *x, WT *y, unsigned limit) \
{ \
for (unsigned i = 0; i < limit; i++) \
{ \
bool overflow = y[i] > (WT)(NT)(-1); \
x[i] = ((NT)y[i]) | (NT)-overflow; \
} \
}
VEC_DEF_SAT_U_TRUC_FMT_1 (uint32_t, uint64_t)
Before this patch:
void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y, unsigned int limit)
{
...
_51 = .SELECT_VL (ivtmp_49, POLY_INT_CST [2, 2]);
ivtmp_35 = _51 * 8;
vect__4.7_32 = .MASK_LEN_LOAD (vectp_y.5_34, 64B, { -1, ... }, _51, 0);
mask_overflow_16.8_30 = vect__4.7_32 > { 4294967295, ... };
vect__5.9_29 = (vector([2,2]) unsigned int) vect__4.7_32;
vect__10.13_20 = .VCOND_MASK (mask_overflow_16.8_30, { 4294967295, ... }, vect__5.9_29);
ivtmp_12 = _51 * 4;
.MASK_LEN_STORE (vectp_x.14_11, 32B, { -1, ... }, _51, 0, vect__10.13_20);
vectp_y.5_33 = vectp_y.5_34 + ivtmp_35;
vectp_x.14_46 = vectp_x.14_11 + ivtmp_12;
ivtmp_50 = ivtmp_49 - _51;
if (ivtmp_50 != 0)
...
}
After this patch:
void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y, unsigned int limit)
{
...
_12 = .SELECT_VL (ivtmp_21, POLY_INT_CST [2, 2]);
ivtmp_34 = _12 * 8;
vect__4.7_31 = .MASK_LEN_LOAD (vectp_y.5_33, 64B, { -1, ... }, _12, 0);
vect_patt_40.8_30 = .SAT_TRUNC (vect__4.7_31); // << .SAT_TRUNC
ivtmp_29 = _12 * 4;
.MASK_LEN_STORE (vectp_x.9_28, 32B, { -1, ... }, _12, 0, vect_patt_40.8_30);
vectp_y.5_32 = vectp_y.5_33 + ivtmp_34;
vectp_x.9_27 = vectp_x.9_28 + ivtmp_29;
ivtmp_20 = ivtmp_21 - _12;
if (ivtmp_20 != 0)
...
}
The below test suites are passed for this patch
* The x86 bootstrap test.
* The x86 fully regression test.
* The rv64gcv fully regression tests.
gcc/ChangeLog:
* tree-vect-patterns.cc (gimple_unsigned_integer_sat_trunc): Add
new decl generated by match.
(vect_recog_sat_trunc_pattern): Add new func impl to recog the
.SAT_TRUNC pattern.
Signed-off-by: Pan Li <pan2.li@intel.com>
---
gcc/tree-vect-patterns.cc | 42 +++++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
Comments
Hi Pan,
Ah so this is doing the same code as in match_unsigned_saturation_trunc inside
the vectorizer?
Looks good to me but can't approve, could you however also place the same comment
about what it's matching from match_unsigned_saturation_trunc into the vector one?
Thanks,
Tamar
> -----Original Message-----
> From: pan2.li@intel.com <pan2.li@intel.com>
> Sent: Tuesday, July 2, 2024 2:32 PM
> To: gcc-patches@gcc.gnu.org
> Cc: juzhe.zhong@rivai.ai; kito.cheng@gmail.com; richard.guenther@gmail.com;
> Tamar Christina <Tamar.Christina@arm.com>; jeffreyalaw@gmail.com;
> rdapp.gcc@gmail.com; Pan Li <pan2.li@intel.com>
> Subject: [PATCH v1] Vect: Support IFN SAT_TRUNC for unsigned vector int
>
> From: Pan Li <pan2.li@intel.com>
>
> This patch would like to support the .SAT_TRUNC for the unsigned
> vector int. Given we have below example code:
>
> Form 1
> #define VEC_DEF_SAT_U_TRUC_FMT_1(NT, WT) \
> void __attribute__((noinline)) \
> vec_sat_u_truc_##WT##_to_##NT##_fmt_1 (NT *x, WT *y, unsigned limit) \
> { \
> for (unsigned i = 0; i < limit; i++) \
> { \
> bool overflow = y[i] > (WT)(NT)(-1); \
> x[i] = ((NT)y[i]) | (NT)-overflow; \
> } \
> }
>
> VEC_DEF_SAT_U_TRUC_FMT_1 (uint32_t, uint64_t)
>
> Before this patch:
> void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y,
> unsigned int limit)
> {
> ...
> _51 = .SELECT_VL (ivtmp_49, POLY_INT_CST [2, 2]);
> ivtmp_35 = _51 * 8;
> vect__4.7_32 = .MASK_LEN_LOAD (vectp_y.5_34, 64B, { -1, ... }, _51, 0);
> mask_overflow_16.8_30 = vect__4.7_32 > { 4294967295, ... };
> vect__5.9_29 = (vector([2,2]) unsigned int) vect__4.7_32;
> vect__10.13_20 = .VCOND_MASK (mask_overflow_16.8_30, { 4294967295, ... },
> vect__5.9_29);
> ivtmp_12 = _51 * 4;
> .MASK_LEN_STORE (vectp_x.14_11, 32B, { -1, ... }, _51, 0, vect__10.13_20);
> vectp_y.5_33 = vectp_y.5_34 + ivtmp_35;
> vectp_x.14_46 = vectp_x.14_11 + ivtmp_12;
> ivtmp_50 = ivtmp_49 - _51;
> if (ivtmp_50 != 0)
> ...
> }
>
> After this patch:
> void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y,
> unsigned int limit)
> {
> ...
> _12 = .SELECT_VL (ivtmp_21, POLY_INT_CST [2, 2]);
> ivtmp_34 = _12 * 8;
> vect__4.7_31 = .MASK_LEN_LOAD (vectp_y.5_33, 64B, { -1, ... }, _12, 0);
> vect_patt_40.8_30 = .SAT_TRUNC (vect__4.7_31); // << .SAT_TRUNC
> ivtmp_29 = _12 * 4;
> .MASK_LEN_STORE (vectp_x.9_28, 32B, { -1, ... }, _12, 0, vect_patt_40.8_30);
> vectp_y.5_32 = vectp_y.5_33 + ivtmp_34;
> vectp_x.9_27 = vectp_x.9_28 + ivtmp_29;
> ivtmp_20 = ivtmp_21 - _12;
> if (ivtmp_20 != 0)
> ...
> }
>
> The below test suites are passed for this patch
> * The x86 bootstrap test.
> * The x86 fully regression test.
> * The rv64gcv fully regression tests.
>
> gcc/ChangeLog:
>
> * tree-vect-patterns.cc (gimple_unsigned_integer_sat_trunc): Add
> new decl generated by match.
> (vect_recog_sat_trunc_pattern): Add new func impl to recog the
> .SAT_TRUNC pattern.
>
> Signed-off-by: Pan Li <pan2.li@intel.com>
> ---
> gcc/tree-vect-patterns.cc | 42 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 42 insertions(+)
>
> diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
> index 519d15f2a43..802c5d0f7c8 100644
> --- a/gcc/tree-vect-patterns.cc
> +++ b/gcc/tree-vect-patterns.cc
> @@ -4489,6 +4489,7 @@ vect_recog_mult_pattern (vec_info *vinfo,
>
> extern bool gimple_unsigned_integer_sat_add (tree, tree*, tree (*)(tree));
> extern bool gimple_unsigned_integer_sat_sub (tree, tree*, tree (*)(tree));
> +extern bool gimple_unsigned_integer_sat_trunc (tree, tree*, tree (*)(tree));
>
> static gimple *
> vect_recog_build_binary_gimple_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
> @@ -4603,6 +4604,46 @@ vect_recog_sat_sub_pattern (vec_info *vinfo,
> stmt_vec_info stmt_vinfo,
> return NULL;
> }
>
> +static gimple *
> +vect_recog_sat_trunc_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
> + tree *type_out)
> +{
> + gimple *last_stmt = STMT_VINFO_STMT (stmt_vinfo);
> +
> + if (!is_gimple_assign (last_stmt))
> + return NULL;
> +
> + tree ops[1];
> + tree lhs = gimple_assign_lhs (last_stmt);
> +
> + if (gimple_unsigned_integer_sat_trunc (lhs, ops, NULL))
> + {
> + tree itype = TREE_TYPE (ops[0]);
> + tree otype = TREE_TYPE (lhs);
> + tree v_itype = get_vectype_for_scalar_type (vinfo, itype);
> + tree v_otype = get_vectype_for_scalar_type (vinfo, otype);
> + internal_fn fn = IFN_SAT_TRUNC;
> +
> + if (v_itype != NULL_TREE && v_otype != NULL_TREE
> + && direct_internal_fn_supported_p (fn, tree_pair (v_otype, v_itype),
> + OPTIMIZE_FOR_BOTH))
> + {
> + gcall *call = gimple_build_call_internal (fn, 1, ops[0]);
> + tree out_ssa = vect_recog_temp_ssa_var (otype, NULL);
> +
> + gimple_call_set_lhs (call, out_ssa);
> + gimple_call_set_nothrow (call, /* nothrow_p */ false);
> + gimple_set_location (call, gimple_location (last_stmt));
> +
> + *type_out = v_otype;
> +
> + return call;
> + }
> + }
> +
> + return NULL;
> +}
> +
> /* Detect a signed division by a constant that wouldn't be
> otherwise vectorized:
>
> @@ -7065,6 +7106,7 @@ static vect_recog_func vect_vect_recog_func_ptrs[] = {
> { vect_recog_mult_pattern, "mult" },
> { vect_recog_sat_add_pattern, "sat_add" },
> { vect_recog_sat_sub_pattern, "sat_sub" },
> + { vect_recog_sat_trunc_pattern, "sat_trunc" },
> { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
> { vect_recog_gcond_pattern, "gcond" },
> { vect_recog_bool_pattern, "bool" },
> --
> 2.34.1
Thanks Tamar.
Looks I missed the comments part, will update in v2.
Pan
-----Original Message-----
From: Tamar Christina <Tamar.Christina@arm.com>
Sent: Tuesday, July 2, 2024 11:03 PM
To: Li, Pan2 <pan2.li@intel.com>; gcc-patches@gcc.gnu.org
Cc: juzhe.zhong@rivai.ai; kito.cheng@gmail.com; richard.guenther@gmail.com; jeffreyalaw@gmail.com; rdapp.gcc@gmail.com
Subject: RE: [PATCH v1] Vect: Support IFN SAT_TRUNC for unsigned vector int
Hi Pan,
Ah so this is doing the same code as in match_unsigned_saturation_trunc inside
the vectorizer?
Looks good to me but can't approve, could you however also place the same comment
about what it's matching from match_unsigned_saturation_trunc into the vector one?
Thanks,
Tamar
> -----Original Message-----
> From: pan2.li@intel.com <pan2.li@intel.com>
> Sent: Tuesday, July 2, 2024 2:32 PM
> To: gcc-patches@gcc.gnu.org
> Cc: juzhe.zhong@rivai.ai; kito.cheng@gmail.com; richard.guenther@gmail.com;
> Tamar Christina <Tamar.Christina@arm.com>; jeffreyalaw@gmail.com;
> rdapp.gcc@gmail.com; Pan Li <pan2.li@intel.com>
> Subject: [PATCH v1] Vect: Support IFN SAT_TRUNC for unsigned vector int
>
> From: Pan Li <pan2.li@intel.com>
>
> This patch would like to support the .SAT_TRUNC for the unsigned
> vector int. Given we have below example code:
>
> Form 1
> #define VEC_DEF_SAT_U_TRUC_FMT_1(NT, WT) \
> void __attribute__((noinline)) \
> vec_sat_u_truc_##WT##_to_##NT##_fmt_1 (NT *x, WT *y, unsigned limit) \
> { \
> for (unsigned i = 0; i < limit; i++) \
> { \
> bool overflow = y[i] > (WT)(NT)(-1); \
> x[i] = ((NT)y[i]) | (NT)-overflow; \
> } \
> }
>
> VEC_DEF_SAT_U_TRUC_FMT_1 (uint32_t, uint64_t)
>
> Before this patch:
> void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y,
> unsigned int limit)
> {
> ...
> _51 = .SELECT_VL (ivtmp_49, POLY_INT_CST [2, 2]);
> ivtmp_35 = _51 * 8;
> vect__4.7_32 = .MASK_LEN_LOAD (vectp_y.5_34, 64B, { -1, ... }, _51, 0);
> mask_overflow_16.8_30 = vect__4.7_32 > { 4294967295, ... };
> vect__5.9_29 = (vector([2,2]) unsigned int) vect__4.7_32;
> vect__10.13_20 = .VCOND_MASK (mask_overflow_16.8_30, { 4294967295, ... },
> vect__5.9_29);
> ivtmp_12 = _51 * 4;
> .MASK_LEN_STORE (vectp_x.14_11, 32B, { -1, ... }, _51, 0, vect__10.13_20);
> vectp_y.5_33 = vectp_y.5_34 + ivtmp_35;
> vectp_x.14_46 = vectp_x.14_11 + ivtmp_12;
> ivtmp_50 = ivtmp_49 - _51;
> if (ivtmp_50 != 0)
> ...
> }
>
> After this patch:
> void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y,
> unsigned int limit)
> {
> ...
> _12 = .SELECT_VL (ivtmp_21, POLY_INT_CST [2, 2]);
> ivtmp_34 = _12 * 8;
> vect__4.7_31 = .MASK_LEN_LOAD (vectp_y.5_33, 64B, { -1, ... }, _12, 0);
> vect_patt_40.8_30 = .SAT_TRUNC (vect__4.7_31); // << .SAT_TRUNC
> ivtmp_29 = _12 * 4;
> .MASK_LEN_STORE (vectp_x.9_28, 32B, { -1, ... }, _12, 0, vect_patt_40.8_30);
> vectp_y.5_32 = vectp_y.5_33 + ivtmp_34;
> vectp_x.9_27 = vectp_x.9_28 + ivtmp_29;
> ivtmp_20 = ivtmp_21 - _12;
> if (ivtmp_20 != 0)
> ...
> }
>
> The below test suites are passed for this patch
> * The x86 bootstrap test.
> * The x86 fully regression test.
> * The rv64gcv fully regression tests.
>
> gcc/ChangeLog:
>
> * tree-vect-patterns.cc (gimple_unsigned_integer_sat_trunc): Add
> new decl generated by match.
> (vect_recog_sat_trunc_pattern): Add new func impl to recog the
> .SAT_TRUNC pattern.
>
> Signed-off-by: Pan Li <pan2.li@intel.com>
> ---
> gcc/tree-vect-patterns.cc | 42 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 42 insertions(+)
>
> diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
> index 519d15f2a43..802c5d0f7c8 100644
> --- a/gcc/tree-vect-patterns.cc
> +++ b/gcc/tree-vect-patterns.cc
> @@ -4489,6 +4489,7 @@ vect_recog_mult_pattern (vec_info *vinfo,
>
> extern bool gimple_unsigned_integer_sat_add (tree, tree*, tree (*)(tree));
> extern bool gimple_unsigned_integer_sat_sub (tree, tree*, tree (*)(tree));
> +extern bool gimple_unsigned_integer_sat_trunc (tree, tree*, tree (*)(tree));
>
> static gimple *
> vect_recog_build_binary_gimple_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
> @@ -4603,6 +4604,46 @@ vect_recog_sat_sub_pattern (vec_info *vinfo,
> stmt_vec_info stmt_vinfo,
> return NULL;
> }
>
> +static gimple *
> +vect_recog_sat_trunc_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
> + tree *type_out)
> +{
> + gimple *last_stmt = STMT_VINFO_STMT (stmt_vinfo);
> +
> + if (!is_gimple_assign (last_stmt))
> + return NULL;
> +
> + tree ops[1];
> + tree lhs = gimple_assign_lhs (last_stmt);
> +
> + if (gimple_unsigned_integer_sat_trunc (lhs, ops, NULL))
> + {
> + tree itype = TREE_TYPE (ops[0]);
> + tree otype = TREE_TYPE (lhs);
> + tree v_itype = get_vectype_for_scalar_type (vinfo, itype);
> + tree v_otype = get_vectype_for_scalar_type (vinfo, otype);
> + internal_fn fn = IFN_SAT_TRUNC;
> +
> + if (v_itype != NULL_TREE && v_otype != NULL_TREE
> + && direct_internal_fn_supported_p (fn, tree_pair (v_otype, v_itype),
> + OPTIMIZE_FOR_BOTH))
> + {
> + gcall *call = gimple_build_call_internal (fn, 1, ops[0]);
> + tree out_ssa = vect_recog_temp_ssa_var (otype, NULL);
> +
> + gimple_call_set_lhs (call, out_ssa);
> + gimple_call_set_nothrow (call, /* nothrow_p */ false);
> + gimple_set_location (call, gimple_location (last_stmt));
> +
> + *type_out = v_otype;
> +
> + return call;
> + }
> + }
> +
> + return NULL;
> +}
> +
> /* Detect a signed division by a constant that wouldn't be
> otherwise vectorized:
>
> @@ -7065,6 +7106,7 @@ static vect_recog_func vect_vect_recog_func_ptrs[] = {
> { vect_recog_mult_pattern, "mult" },
> { vect_recog_sat_add_pattern, "sat_add" },
> { vect_recog_sat_sub_pattern, "sat_sub" },
> + { vect_recog_sat_trunc_pattern, "sat_trunc" },
> { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
> { vect_recog_gcond_pattern, "gcond" },
> { vect_recog_bool_pattern, "bool" },
> --
> 2.34.1
@@ -4489,6 +4489,7 @@ vect_recog_mult_pattern (vec_info *vinfo,
extern bool gimple_unsigned_integer_sat_add (tree, tree*, tree (*)(tree));
extern bool gimple_unsigned_integer_sat_sub (tree, tree*, tree (*)(tree));
+extern bool gimple_unsigned_integer_sat_trunc (tree, tree*, tree (*)(tree));
static gimple *
vect_recog_build_binary_gimple_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
@@ -4603,6 +4604,46 @@ vect_recog_sat_sub_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
return NULL;
}
+static gimple *
+vect_recog_sat_trunc_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
+ tree *type_out)
+{
+ gimple *last_stmt = STMT_VINFO_STMT (stmt_vinfo);
+
+ if (!is_gimple_assign (last_stmt))
+ return NULL;
+
+ tree ops[1];
+ tree lhs = gimple_assign_lhs (last_stmt);
+
+ if (gimple_unsigned_integer_sat_trunc (lhs, ops, NULL))
+ {
+ tree itype = TREE_TYPE (ops[0]);
+ tree otype = TREE_TYPE (lhs);
+ tree v_itype = get_vectype_for_scalar_type (vinfo, itype);
+ tree v_otype = get_vectype_for_scalar_type (vinfo, otype);
+ internal_fn fn = IFN_SAT_TRUNC;
+
+ if (v_itype != NULL_TREE && v_otype != NULL_TREE
+ && direct_internal_fn_supported_p (fn, tree_pair (v_otype, v_itype),
+ OPTIMIZE_FOR_BOTH))
+ {
+ gcall *call = gimple_build_call_internal (fn, 1, ops[0]);
+ tree out_ssa = vect_recog_temp_ssa_var (otype, NULL);
+
+ gimple_call_set_lhs (call, out_ssa);
+ gimple_call_set_nothrow (call, /* nothrow_p */ false);
+ gimple_set_location (call, gimple_location (last_stmt));
+
+ *type_out = v_otype;
+
+ return call;
+ }
+ }
+
+ return NULL;
+}
+
/* Detect a signed division by a constant that wouldn't be
otherwise vectorized:
@@ -7065,6 +7106,7 @@ static vect_recog_func vect_vect_recog_func_ptrs[] = {
{ vect_recog_mult_pattern, "mult" },
{ vect_recog_sat_add_pattern, "sat_add" },
{ vect_recog_sat_sub_pattern, "sat_sub" },
+ { vect_recog_sat_trunc_pattern, "sat_trunc" },
{ vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
{ vect_recog_gcond_pattern, "gcond" },
{ vect_recog_bool_pattern, "bool" },