[v2] Strip of a vector load which is only used partially.
Commit Message
Here's adjused patch.
Ok for trunk?
Optimize
_4 = VEC_PERM_EXPR <_1, _1, { 4, 5, 6, 7, 4, 5, 6, 7 }>;
_5 = BIT_FIELD_REF <_4, 128, 0>;
to
_5 = BIT_FIELD_REF <_1, 128, 128>;
gcc/ChangeLog:
PR tree-optimization/102583
* tree-ssa-forwprop.cc (simplify_bitfield_ref): Extended to a
contiguous stride in the VEC_PERM_EXPR.
gcc/testsuite/ChangeLog:
* gcc.target/i386/pr102583.c: New test.
* gcc.target/i386/pr92645-2.c: Adjust testcase.
* gcc.target/i386/pr92645-3.c: Ditto.
---
gcc/testsuite/gcc.target/i386/pr102583.c | 30 ++++++++
gcc/testsuite/gcc.target/i386/pr92645-2.c | 4 +-
gcc/testsuite/gcc.target/i386/pr92645-3.c | 4 +-
gcc/tree-ssa-forwprop.cc | 89 ++++++++++++++++-------
4 files changed, 96 insertions(+), 31 deletions(-)
create mode 100644 gcc/testsuite/gcc.target/i386/pr102583.c
Comments
On Mon, May 9, 2022 at 7:11 AM liuhongt via Gcc-patches
<gcc-patches@gcc.gnu.org> wrote:
>
> Here's adjused patch.
> Ok for trunk?
>
> Optimize
>
> _4 = VEC_PERM_EXPR <_1, _1, { 4, 5, 6, 7, 4, 5, 6, 7 }>;
> _5 = BIT_FIELD_REF <_4, 128, 0>;
>
> to
>
> _5 = BIT_FIELD_REF <_1, 128, 128>;
>
> gcc/ChangeLog:
>
> PR tree-optimization/102583
> * tree-ssa-forwprop.cc (simplify_bitfield_ref): Extended to a
> contiguous stride in the VEC_PERM_EXPR.
>
> gcc/testsuite/ChangeLog:
>
> * gcc.target/i386/pr102583.c: New test.
> * gcc.target/i386/pr92645-2.c: Adjust testcase.
> * gcc.target/i386/pr92645-3.c: Ditto.
> ---
> gcc/testsuite/gcc.target/i386/pr102583.c | 30 ++++++++
> gcc/testsuite/gcc.target/i386/pr92645-2.c | 4 +-
> gcc/testsuite/gcc.target/i386/pr92645-3.c | 4 +-
> gcc/tree-ssa-forwprop.cc | 89 ++++++++++++++++-------
> 4 files changed, 96 insertions(+), 31 deletions(-)
> create mode 100644 gcc/testsuite/gcc.target/i386/pr102583.c
>
> diff --git a/gcc/testsuite/gcc.target/i386/pr102583.c b/gcc/testsuite/gcc.target/i386/pr102583.c
> new file mode 100644
> index 00000000000..4ef2f296d0c
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/i386/pr102583.c
> @@ -0,0 +1,30 @@
> +/* { dg-do compile } */
> +/* { dg-options "-mavx512f -O2" } */
> +/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+32\(%.*%ymm} 1 } } */
> +/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+16\(%.*%xmm} 1 } } */
> +/* { dg-final { scan-assembler-times {(?n)vmovq[ \t]+16\(%.*%xmm} 1 { target { ! ia32 } } } } */
> +/* { dg-final { scan-assembler-not {(?n)vpermd[ \t]+.*%zmm} } } */
> +
> +typedef int v16si __attribute__((vector_size(64)));
> +typedef float v8sf __attribute__((vector_size(32)));
> +typedef float v4sf __attribute__((vector_size(16)));
> +typedef float v2sf __attribute__((vector_size(8)));
> +
> +v8sf part (v16si *srcp)
> +{
> + v16si src = *srcp;
> + return (v8sf) { (float)src[8], (float) src[9], (float)src[10], (float)src[11],
> + (float)src[12], (float)src[13], (float)src[14], (float)src[15] };
> +}
> +
> +v4sf part1 (v16si *srcp)
> +{
> + v16si src = *srcp;
> + return (v4sf) { (float)src[4], (float)src[5], (float)src[6], (float)src[7] };
> +}
> +
> +v2sf part2 (v16si *srcp)
> +{
> + v16si src = *srcp;
> + return (v2sf) { (float)src[4], (float)src[5] };
> +}
> diff --git a/gcc/testsuite/gcc.target/i386/pr92645-2.c b/gcc/testsuite/gcc.target/i386/pr92645-2.c
> index d34ed3aa8e5..f0608de938a 100644
> --- a/gcc/testsuite/gcc.target/i386/pr92645-2.c
> +++ b/gcc/testsuite/gcc.target/i386/pr92645-2.c
> @@ -29,6 +29,6 @@ void odd (v2si *dst, v4si *srcp)
> }
>
> /* { dg-final { scan-tree-dump-times "BIT_FIELD_REF" 4 "cddce1" } } */
> -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
> +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
> /* Ideally highpart extraction would elide the permutation as well. */
> -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
> +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
> diff --git a/gcc/testsuite/gcc.target/i386/pr92645-3.c b/gcc/testsuite/gcc.target/i386/pr92645-3.c
> index 9c08c9fb632..691011195c9 100644
> --- a/gcc/testsuite/gcc.target/i386/pr92645-3.c
> +++ b/gcc/testsuite/gcc.target/i386/pr92645-3.c
> @@ -32,6 +32,6 @@ void odd (v4sf *dst, v8si *srcp)
> /* Four conversions, on the smaller vector type, to not convert excess
> elements. */
> /* { dg-final { scan-tree-dump-times " = \\\(vector\\\(4\\\) float\\\)" 4 "cddce1" } } */
> -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
> +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
> /* Ideally highpart extraction would elide the VEC_PERM_EXPR as well. */
> -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
> +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
> diff --git a/gcc/tree-ssa-forwprop.cc b/gcc/tree-ssa-forwprop.cc
> index 484491fa1c5..f91f738895d 100644
> --- a/gcc/tree-ssa-forwprop.cc
> +++ b/gcc/tree-ssa-forwprop.cc
> @@ -2334,8 +2334,10 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
> gimple *stmt = gsi_stmt (*gsi);
> gimple *def_stmt;
> tree op, op0, op1;
> - tree elem_type;
> - unsigned idx, size;
> + tree elem_type, type;
> + tree p, m, tem;
> + unsigned HOST_WIDE_INT nelts;
> + unsigned idx, size, elem_size;
> enum tree_code code;
>
> op = gimple_assign_rhs1 (stmt);
> @@ -2353,42 +2355,75 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
> op1 = TREE_OPERAND (op, 1);
> code = gimple_assign_rhs_code (def_stmt);
> elem_type = TREE_TYPE (TREE_TYPE (op0));
> - if (TREE_TYPE (op) != elem_type)
> + type = TREE_TYPE (op);
> + /* Also hanlde vector type.
> + .i.e.
> + _7 = VEC_PERM_EXPR <_1, _1, { 2, 3, 2, 3 }>;
> + _11 = BIT_FIELD_REF <_7, 64, 0>;
> +
> + to
> +
> + _11 = BIT_FIELD_REF <_1, 64, 64>. */
> + if (type != elem_type
> + && (!VECTOR_TYPE_P (type) || TREE_TYPE (type) != elem_type))
> return false;
I'm not sure we actually need this check. I think we could handle
a __int128_t extract from the permuted vector as well, no?
>
> - size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
> + elem_size = size = TREE_INT_CST_LOW (TYPE_SIZE (type));
That's not going to work with VLA vectors, I think you need to make
elem_size/size poly_uint64 and use tree_to_poly_uint64 (TYPE_SIZE (type))
here.
> if (maybe_ne (bit_field_size (op), size))
> return false;
>
> - if (code == VEC_PERM_EXPR
> - && constant_multiple_p (bit_field_offset (op), size, &idx))
> + if (code != VEC_PERM_EXPR
> + || !constant_multiple_p (bit_field_offset (op), size, &idx))
That might be a bit restrictive? I think we want elem_size here
with elem_size picked from the vector type of the VEC_PERM_EXPR
operand.
> + return false;
> +
> + m = gimple_assign_rhs3 (def_stmt);
> + if (TREE_CODE (m) != VECTOR_CST
> + || !VECTOR_CST_NELTS (m).is_constant (&nelts))
> + return false;
> +
> + /* One element. */
> + if (type == elem_type)
I think this should work on the sizes, so
if (known_eq (size, elem_size))
...
that makes sure we do not end up with nelts_op == 1 below and also
handle the case where the BIT_FIELD_REF would do int<->float
punning.
> + idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
> + else
> {
> - tree p, m, tem;
> - unsigned HOST_WIDE_INT nelts;
> - m = gimple_assign_rhs3 (def_stmt);
> - if (TREE_CODE (m) != VECTOR_CST
> - || !VECTOR_CST_NELTS (m).is_constant (&nelts))
> + elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
> + unsigned nelts_op;
> + if (!constant_multiple_p (bit_field_size (op), elem_size, &nelts_op))
bit_field_size (op) == size? I think we also want to handle
power-of-two nelts_op
only (I can't think of how we get a non-power-of-two, but checking
pow2p_hwi (nelts_op)
woudl be nice if only for documentation purposes)
> return false;
> - idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
> - idx %= 2 * nelts;
> - if (idx < nelts)
> - {
> - p = gimple_assign_rhs1 (def_stmt);
> - }
> - else
> + unsigned start = TREE_INT_CST_LOW (vector_cst_elt (m, idx));
> + unsigned end = TREE_INT_CST_LOW (vector_cst_elt (m, idx + nelts_op - 1));
> + /* Be in the same vector. */
> + if ((start < nelts) != (end < nelts))
> + return false;
> + for (unsigned HOST_WIDE_INT i = 1; i != nelts_op; i++)
> {
> - p = gimple_assign_rhs2 (def_stmt);
> - idx -= nelts;
> + /* Continuous area. */
> + if (TREE_INT_CST_LOW (vector_cst_elt (m, idx + i)) - 1
> + != TREE_INT_CST_LOW (vector_cst_elt (m, idx + i - 1)))
> + return false;
> }
> - tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
> - unshare_expr (p), op1, bitsize_int (idx * size));
> - gimple_assign_set_rhs1 (stmt, tem);
> - fold_stmt (gsi);
> - update_stmt (gsi_stmt (*gsi));
> - return true;
> + /* Alignment not worse than before. */
> + unsigned dest_align = TREE_INT_CST_LOW (TYPE_SIZE (type));
> + if (start * elem_size % dest_align)
> + return false;
So I think we want to have start * elem_size be a multiple of size
(the check I said above is too restrictive on the _original_ bit-field-ref but
we want to have for the target bit-field-ref). I think that's what you check
here but see above for the use of poly-ints.
> + idx = start;
> }
>
> - return false;
> + idx %= 2 * nelts;
that's odd - why do you need to clamp idx?
> + if (idx < nelts)
> + p = gimple_assign_rhs1 (def_stmt);
> + else
> + {
> + p = gimple_assign_rhs2 (def_stmt);
> + idx -= nelts;
> + }
> +
> + tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
> + unshare_expr (p), op1, bitsize_int (idx * elem_size));
You don't need to unshare_expr 'p' here.
Otherwise looks OK.
Thanks,
Richard.
> + gimple_assign_set_rhs1 (stmt, tem);
> + fold_stmt (gsi);
> + update_stmt (gsi_stmt (*gsi));
> + return true;
> }
>
> /* Determine whether applying the 2 permutations (mask1 then mask2)
> --
> 2.18.1
>
On Tue, May 10, 2022 at 2:54 PM Richard Biener via Gcc-patches
<gcc-patches@gcc.gnu.org> wrote:
>
> On Mon, May 9, 2022 at 7:11 AM liuhongt via Gcc-patches
> <gcc-patches@gcc.gnu.org> wrote:
> >
> > Here's adjused patch.
> > Ok for trunk?
> >
> > Optimize
> >
> > _4 = VEC_PERM_EXPR <_1, _1, { 4, 5, 6, 7, 4, 5, 6, 7 }>;
> > _5 = BIT_FIELD_REF <_4, 128, 0>;
> >
> > to
> >
> > _5 = BIT_FIELD_REF <_1, 128, 128>;
> >
> > gcc/ChangeLog:
> >
> > PR tree-optimization/102583
> > * tree-ssa-forwprop.cc (simplify_bitfield_ref): Extended to a
> > contiguous stride in the VEC_PERM_EXPR.
> >
> > gcc/testsuite/ChangeLog:
> >
> > * gcc.target/i386/pr102583.c: New test.
> > * gcc.target/i386/pr92645-2.c: Adjust testcase.
> > * gcc.target/i386/pr92645-3.c: Ditto.
> > ---
> > gcc/testsuite/gcc.target/i386/pr102583.c | 30 ++++++++
> > gcc/testsuite/gcc.target/i386/pr92645-2.c | 4 +-
> > gcc/testsuite/gcc.target/i386/pr92645-3.c | 4 +-
> > gcc/tree-ssa-forwprop.cc | 89 ++++++++++++++++-------
> > 4 files changed, 96 insertions(+), 31 deletions(-)
> > create mode 100644 gcc/testsuite/gcc.target/i386/pr102583.c
> >
> > diff --git a/gcc/testsuite/gcc.target/i386/pr102583.c b/gcc/testsuite/gcc.target/i386/pr102583.c
> > new file mode 100644
> > index 00000000000..4ef2f296d0c
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/i386/pr102583.c
> > @@ -0,0 +1,30 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-mavx512f -O2" } */
> > +/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+32\(%.*%ymm} 1 } } */
> > +/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+16\(%.*%xmm} 1 } } */
> > +/* { dg-final { scan-assembler-times {(?n)vmovq[ \t]+16\(%.*%xmm} 1 { target { ! ia32 } } } } */
> > +/* { dg-final { scan-assembler-not {(?n)vpermd[ \t]+.*%zmm} } } */
> > +
> > +typedef int v16si __attribute__((vector_size(64)));
> > +typedef float v8sf __attribute__((vector_size(32)));
> > +typedef float v4sf __attribute__((vector_size(16)));
> > +typedef float v2sf __attribute__((vector_size(8)));
> > +
> > +v8sf part (v16si *srcp)
> > +{
> > + v16si src = *srcp;
> > + return (v8sf) { (float)src[8], (float) src[9], (float)src[10], (float)src[11],
> > + (float)src[12], (float)src[13], (float)src[14], (float)src[15] };
> > +}
> > +
> > +v4sf part1 (v16si *srcp)
> > +{
> > + v16si src = *srcp;
> > + return (v4sf) { (float)src[4], (float)src[5], (float)src[6], (float)src[7] };
> > +}
> > +
> > +v2sf part2 (v16si *srcp)
> > +{
> > + v16si src = *srcp;
> > + return (v2sf) { (float)src[4], (float)src[5] };
> > +}
> > diff --git a/gcc/testsuite/gcc.target/i386/pr92645-2.c b/gcc/testsuite/gcc.target/i386/pr92645-2.c
> > index d34ed3aa8e5..f0608de938a 100644
> > --- a/gcc/testsuite/gcc.target/i386/pr92645-2.c
> > +++ b/gcc/testsuite/gcc.target/i386/pr92645-2.c
> > @@ -29,6 +29,6 @@ void odd (v2si *dst, v4si *srcp)
> > }
> >
> > /* { dg-final { scan-tree-dump-times "BIT_FIELD_REF" 4 "cddce1" } } */
> > -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
> > +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
> > /* Ideally highpart extraction would elide the permutation as well. */
> > -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
> > +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
> > diff --git a/gcc/testsuite/gcc.target/i386/pr92645-3.c b/gcc/testsuite/gcc.target/i386/pr92645-3.c
> > index 9c08c9fb632..691011195c9 100644
> > --- a/gcc/testsuite/gcc.target/i386/pr92645-3.c
> > +++ b/gcc/testsuite/gcc.target/i386/pr92645-3.c
> > @@ -32,6 +32,6 @@ void odd (v4sf *dst, v8si *srcp)
> > /* Four conversions, on the smaller vector type, to not convert excess
> > elements. */
> > /* { dg-final { scan-tree-dump-times " = \\\(vector\\\(4\\\) float\\\)" 4 "cddce1" } } */
> > -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
> > +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
> > /* Ideally highpart extraction would elide the VEC_PERM_EXPR as well. */
> > -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
> > +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
> > diff --git a/gcc/tree-ssa-forwprop.cc b/gcc/tree-ssa-forwprop.cc
> > index 484491fa1c5..f91f738895d 100644
> > --- a/gcc/tree-ssa-forwprop.cc
> > +++ b/gcc/tree-ssa-forwprop.cc
> > @@ -2334,8 +2334,10 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
> > gimple *stmt = gsi_stmt (*gsi);
> > gimple *def_stmt;
> > tree op, op0, op1;
> > - tree elem_type;
> > - unsigned idx, size;
> > + tree elem_type, type;
> > + tree p, m, tem;
> > + unsigned HOST_WIDE_INT nelts;
> > + unsigned idx, size, elem_size;
> > enum tree_code code;
> >
> > op = gimple_assign_rhs1 (stmt);
> > @@ -2353,42 +2355,75 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
> > op1 = TREE_OPERAND (op, 1);
> > code = gimple_assign_rhs_code (def_stmt);
> > elem_type = TREE_TYPE (TREE_TYPE (op0));
> > - if (TREE_TYPE (op) != elem_type)
> > + type = TREE_TYPE (op);
> > + /* Also hanlde vector type.
> > + .i.e.
> > + _7 = VEC_PERM_EXPR <_1, _1, { 2, 3, 2, 3 }>;
> > + _11 = BIT_FIELD_REF <_7, 64, 0>;
> > +
> > + to
> > +
> > + _11 = BIT_FIELD_REF <_1, 64, 64>. */
> > + if (type != elem_type
> > + && (!VECTOR_TYPE_P (type) || TREE_TYPE (type) != elem_type))
> > return false;
>
> I'm not sure we actually need this check. I think we could handle
> a __int128_t extract from the permuted vector as well, no?
Yes, changed.
>
> >
> > - size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
> > + elem_size = size = TREE_INT_CST_LOW (TYPE_SIZE (type));
>
> That's not going to work with VLA vectors, I think you need to make
> elem_size/size poly_uint64 and use tree_to_poly_uint64 (TYPE_SIZE (type))
> here.
>
Changed.
> > if (maybe_ne (bit_field_size (op), size))
> > return false;
> >
> > - if (code == VEC_PERM_EXPR
> > - && constant_multiple_p (bit_field_offset (op), size, &idx))
> > + if (code != VEC_PERM_EXPR
> > + || !constant_multiple_p (bit_field_offset (op), size, &idx))
>
> That might be a bit restrictive? I think we want elem_size here
> with elem_size picked from the vector type of the VEC_PERM_EXPR
> operand.
>
Changed.
> > + return false;
> > +
> > + m = gimple_assign_rhs3 (def_stmt);
> > + if (TREE_CODE (m) != VECTOR_CST
> > + || !VECTOR_CST_NELTS (m).is_constant (&nelts))
> > + return false;
> > +
> > + /* One element. */
> > + if (type == elem_type)
>
> I think this should work on the sizes, so
>
> if (known_eq (size, elem_size))
> ...
>
> that makes sure we do not end up with nelts_op == 1 below and also
> handle the case where the BIT_FIELD_REF would do int<->float
> punning.
>
Yes, also for V1TImode <-> TImode punning.
> > + idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
> > + else
> > {
> > - tree p, m, tem;
> > - unsigned HOST_WIDE_INT nelts;
> > - m = gimple_assign_rhs3 (def_stmt);
> > - if (TREE_CODE (m) != VECTOR_CST
> > - || !VECTOR_CST_NELTS (m).is_constant (&nelts))
> > + elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
> > + unsigned nelts_op;
> > + if (!constant_multiple_p (bit_field_size (op), elem_size, &nelts_op))
>
> bit_field_size (op) == size? I think we also want to handle
> power-of-two nelts_op
> only (I can't think of how we get a non-power-of-two, but checking
> pow2p_hwi (nelts_op)
> woudl be nice if only for documentation purposes)
Add the check.
>
> > return false;
> > - idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
> > - idx %= 2 * nelts;
> > - if (idx < nelts)
> > - {
> > - p = gimple_assign_rhs1 (def_stmt);
> > - }
> > - else
> > + unsigned start = TREE_INT_CST_LOW (vector_cst_elt (m, idx));
> > + unsigned end = TREE_INT_CST_LOW (vector_cst_elt (m, idx + nelts_op - 1));
> > + /* Be in the same vector. */
> > + if ((start < nelts) != (end < nelts))
> > + return false;
> > + for (unsigned HOST_WIDE_INT i = 1; i != nelts_op; i++)
> > {
> > - p = gimple_assign_rhs2 (def_stmt);
> > - idx -= nelts;
> > + /* Continuous area. */
> > + if (TREE_INT_CST_LOW (vector_cst_elt (m, idx + i)) - 1
> > + != TREE_INT_CST_LOW (vector_cst_elt (m, idx + i - 1)))
> > + return false;
> > }
> > - tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
> > - unshare_expr (p), op1, bitsize_int (idx * size));
> > - gimple_assign_set_rhs1 (stmt, tem);
> > - fold_stmt (gsi);
> > - update_stmt (gsi_stmt (*gsi));
> > - return true;
> > + /* Alignment not worse than before. */
> > + unsigned dest_align = TREE_INT_CST_LOW (TYPE_SIZE (type));
> > + if (start * elem_size % dest_align)
> > + return false;
>
> So I think we want to have start * elem_size be a multiple of size
> (the check I said above is too restrictive on the _original_ bit-field-ref but
> we want to have for the target bit-field-ref). I think that's what you check
> here but see above for the use of poly-ints.
Changed.
>
> > + idx = start;
> > }
> >
> > - return false;
> > + idx %= 2 * nelts;
>
> that's odd - why do you need to clamp idx?
Changed.
This is just copy from original codes, it looks it's not needed any
more, similar for your below comment.
>
> > + if (idx < nelts)
> > + p = gimple_assign_rhs1 (def_stmt);
> > + else
> > + {
> > + p = gimple_assign_rhs2 (def_stmt);
> > + idx -= nelts;
> > + }
> > +
> > + tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
> > + unshare_expr (p), op1, bitsize_int (idx * elem_size));
>
> You don't need to unshare_expr 'p' here.
>
> Otherwise looks OK.
>
Here's updated patch.
> Thanks,
> Richard.
>
> > + gimple_assign_set_rhs1 (stmt, tem);
> > + fold_stmt (gsi);
> > + update_stmt (gsi_stmt (*gsi));
> > + return true;
> > }
> >
> > /* Determine whether applying the 2 permutations (mask1 then mask2)
> > --
> > 2.18.1
> >
On Thu, May 12, 2022 at 3:48 AM Hongtao Liu <crazylht@gmail.com> wrote:
>
> On Tue, May 10, 2022 at 2:54 PM Richard Biener via Gcc-patches
> <gcc-patches@gcc.gnu.org> wrote:
> >
> > On Mon, May 9, 2022 at 7:11 AM liuhongt via Gcc-patches
> > <gcc-patches@gcc.gnu.org> wrote:
> > >
> > > Here's adjused patch.
> > > Ok for trunk?
> > >
> > > Optimize
> > >
> > > _4 = VEC_PERM_EXPR <_1, _1, { 4, 5, 6, 7, 4, 5, 6, 7 }>;
> > > _5 = BIT_FIELD_REF <_4, 128, 0>;
> > >
> > > to
> > >
> > > _5 = BIT_FIELD_REF <_1, 128, 128>;
> > >
> > > gcc/ChangeLog:
> > >
> > > PR tree-optimization/102583
> > > * tree-ssa-forwprop.cc (simplify_bitfield_ref): Extended to a
> > > contiguous stride in the VEC_PERM_EXPR.
> > >
> > > gcc/testsuite/ChangeLog:
> > >
> > > * gcc.target/i386/pr102583.c: New test.
> > > * gcc.target/i386/pr92645-2.c: Adjust testcase.
> > > * gcc.target/i386/pr92645-3.c: Ditto.
> > > ---
> > > gcc/testsuite/gcc.target/i386/pr102583.c | 30 ++++++++
> > > gcc/testsuite/gcc.target/i386/pr92645-2.c | 4 +-
> > > gcc/testsuite/gcc.target/i386/pr92645-3.c | 4 +-
> > > gcc/tree-ssa-forwprop.cc | 89 ++++++++++++++++-------
> > > 4 files changed, 96 insertions(+), 31 deletions(-)
> > > create mode 100644 gcc/testsuite/gcc.target/i386/pr102583.c
> > >
> > > diff --git a/gcc/testsuite/gcc.target/i386/pr102583.c b/gcc/testsuite/gcc.target/i386/pr102583.c
> > > new file mode 100644
> > > index 00000000000..4ef2f296d0c
> > > --- /dev/null
> > > +++ b/gcc/testsuite/gcc.target/i386/pr102583.c
> > > @@ -0,0 +1,30 @@
> > > +/* { dg-do compile } */
> > > +/* { dg-options "-mavx512f -O2" } */
> > > +/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+32\(%.*%ymm} 1 } } */
> > > +/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+16\(%.*%xmm} 1 } } */
> > > +/* { dg-final { scan-assembler-times {(?n)vmovq[ \t]+16\(%.*%xmm} 1 { target { ! ia32 } } } } */
> > > +/* { dg-final { scan-assembler-not {(?n)vpermd[ \t]+.*%zmm} } } */
> > > +
> > > +typedef int v16si __attribute__((vector_size(64)));
> > > +typedef float v8sf __attribute__((vector_size(32)));
> > > +typedef float v4sf __attribute__((vector_size(16)));
> > > +typedef float v2sf __attribute__((vector_size(8)));
> > > +
> > > +v8sf part (v16si *srcp)
> > > +{
> > > + v16si src = *srcp;
> > > + return (v8sf) { (float)src[8], (float) src[9], (float)src[10], (float)src[11],
> > > + (float)src[12], (float)src[13], (float)src[14], (float)src[15] };
> > > +}
> > > +
> > > +v4sf part1 (v16si *srcp)
> > > +{
> > > + v16si src = *srcp;
> > > + return (v4sf) { (float)src[4], (float)src[5], (float)src[6], (float)src[7] };
> > > +}
> > > +
> > > +v2sf part2 (v16si *srcp)
> > > +{
> > > + v16si src = *srcp;
> > > + return (v2sf) { (float)src[4], (float)src[5] };
> > > +}
> > > diff --git a/gcc/testsuite/gcc.target/i386/pr92645-2.c b/gcc/testsuite/gcc.target/i386/pr92645-2.c
> > > index d34ed3aa8e5..f0608de938a 100644
> > > --- a/gcc/testsuite/gcc.target/i386/pr92645-2.c
> > > +++ b/gcc/testsuite/gcc.target/i386/pr92645-2.c
> > > @@ -29,6 +29,6 @@ void odd (v2si *dst, v4si *srcp)
> > > }
> > >
> > > /* { dg-final { scan-tree-dump-times "BIT_FIELD_REF" 4 "cddce1" } } */
> > > -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
> > > +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
> > > /* Ideally highpart extraction would elide the permutation as well. */
> > > -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
> > > +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
> > > diff --git a/gcc/testsuite/gcc.target/i386/pr92645-3.c b/gcc/testsuite/gcc.target/i386/pr92645-3.c
> > > index 9c08c9fb632..691011195c9 100644
> > > --- a/gcc/testsuite/gcc.target/i386/pr92645-3.c
> > > +++ b/gcc/testsuite/gcc.target/i386/pr92645-3.c
> > > @@ -32,6 +32,6 @@ void odd (v4sf *dst, v8si *srcp)
> > > /* Four conversions, on the smaller vector type, to not convert excess
> > > elements. */
> > > /* { dg-final { scan-tree-dump-times " = \\\(vector\\\(4\\\) float\\\)" 4 "cddce1" } } */
> > > -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
> > > +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
> > > /* Ideally highpart extraction would elide the VEC_PERM_EXPR as well. */
> > > -/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
> > > +/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
> > > diff --git a/gcc/tree-ssa-forwprop.cc b/gcc/tree-ssa-forwprop.cc
> > > index 484491fa1c5..f91f738895d 100644
> > > --- a/gcc/tree-ssa-forwprop.cc
> > > +++ b/gcc/tree-ssa-forwprop.cc
> > > @@ -2334,8 +2334,10 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
> > > gimple *stmt = gsi_stmt (*gsi);
> > > gimple *def_stmt;
> > > tree op, op0, op1;
> > > - tree elem_type;
> > > - unsigned idx, size;
> > > + tree elem_type, type;
> > > + tree p, m, tem;
> > > + unsigned HOST_WIDE_INT nelts;
> > > + unsigned idx, size, elem_size;
> > > enum tree_code code;
> > >
> > > op = gimple_assign_rhs1 (stmt);
> > > @@ -2353,42 +2355,75 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
> > > op1 = TREE_OPERAND (op, 1);
> > > code = gimple_assign_rhs_code (def_stmt);
> > > elem_type = TREE_TYPE (TREE_TYPE (op0));
> > > - if (TREE_TYPE (op) != elem_type)
> > > + type = TREE_TYPE (op);
> > > + /* Also hanlde vector type.
> > > + .i.e.
> > > + _7 = VEC_PERM_EXPR <_1, _1, { 2, 3, 2, 3 }>;
> > > + _11 = BIT_FIELD_REF <_7, 64, 0>;
> > > +
> > > + to
> > > +
> > > + _11 = BIT_FIELD_REF <_1, 64, 64>. */
> > > + if (type != elem_type
> > > + && (!VECTOR_TYPE_P (type) || TREE_TYPE (type) != elem_type))
> > > return false;
> >
> > I'm not sure we actually need this check. I think we could handle
> > a __int128_t extract from the permuted vector as well, no?
> Yes, changed.
> >
> > >
> > > - size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
> > > + elem_size = size = TREE_INT_CST_LOW (TYPE_SIZE (type));
> >
> > That's not going to work with VLA vectors, I think you need to make
> > elem_size/size poly_uint64 and use tree_to_poly_uint64 (TYPE_SIZE (type))
> > here.
> >
> Changed.
> > > if (maybe_ne (bit_field_size (op), size))
> > > return false;
> > >
> > > - if (code == VEC_PERM_EXPR
> > > - && constant_multiple_p (bit_field_offset (op), size, &idx))
> > > + if (code != VEC_PERM_EXPR
> > > + || !constant_multiple_p (bit_field_offset (op), size, &idx))
> >
> > That might be a bit restrictive? I think we want elem_size here
> > with elem_size picked from the vector type of the VEC_PERM_EXPR
> > operand.
> >
> Changed.
> > > + return false;
> > > +
> > > + m = gimple_assign_rhs3 (def_stmt);
> > > + if (TREE_CODE (m) != VECTOR_CST
> > > + || !VECTOR_CST_NELTS (m).is_constant (&nelts))
> > > + return false;
> > > +
> > > + /* One element. */
> > > + if (type == elem_type)
> >
> > I think this should work on the sizes, so
> >
> > if (known_eq (size, elem_size))
> > ...
> >
> > that makes sure we do not end up with nelts_op == 1 below and also
> > handle the case where the BIT_FIELD_REF would do int<->float
> > punning.
> >
> Yes, also for V1TImode <-> TImode punning.
> > > + idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
> > > + else
> > > {
> > > - tree p, m, tem;
> > > - unsigned HOST_WIDE_INT nelts;
> > > - m = gimple_assign_rhs3 (def_stmt);
> > > - if (TREE_CODE (m) != VECTOR_CST
> > > - || !VECTOR_CST_NELTS (m).is_constant (&nelts))
> > > + elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
> > > + unsigned nelts_op;
> > > + if (!constant_multiple_p (bit_field_size (op), elem_size, &nelts_op))
> >
> > bit_field_size (op) == size? I think we also want to handle
> > power-of-two nelts_op
> > only (I can't think of how we get a non-power-of-two, but checking
> > pow2p_hwi (nelts_op)
> > woudl be nice if only for documentation purposes)
> Add the check.
> >
> > > return false;
> > > - idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
> > > - idx %= 2 * nelts;
> > > - if (idx < nelts)
> > > - {
> > > - p = gimple_assign_rhs1 (def_stmt);
> > > - }
> > > - else
> > > + unsigned start = TREE_INT_CST_LOW (vector_cst_elt (m, idx));
> > > + unsigned end = TREE_INT_CST_LOW (vector_cst_elt (m, idx + nelts_op - 1));
> > > + /* Be in the same vector. */
> > > + if ((start < nelts) != (end < nelts))
> > > + return false;
> > > + for (unsigned HOST_WIDE_INT i = 1; i != nelts_op; i++)
> > > {
> > > - p = gimple_assign_rhs2 (def_stmt);
> > > - idx -= nelts;
> > > + /* Continuous area. */
> > > + if (TREE_INT_CST_LOW (vector_cst_elt (m, idx + i)) - 1
> > > + != TREE_INT_CST_LOW (vector_cst_elt (m, idx + i - 1)))
> > > + return false;
> > > }
> > > - tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
> > > - unshare_expr (p), op1, bitsize_int (idx * size));
> > > - gimple_assign_set_rhs1 (stmt, tem);
> > > - fold_stmt (gsi);
> > > - update_stmt (gsi_stmt (*gsi));
> > > - return true;
> > > + /* Alignment not worse than before. */
> > > + unsigned dest_align = TREE_INT_CST_LOW (TYPE_SIZE (type));
> > > + if (start * elem_size % dest_align)
> > > + return false;
> >
> > So I think we want to have start * elem_size be a multiple of size
> > (the check I said above is too restrictive on the _original_ bit-field-ref but
> > we want to have for the target bit-field-ref). I think that's what you check
> > here but see above for the use of poly-ints.
> Changed.
> >
> > > + idx = start;
> > > }
> > >
> > > - return false;
> > > + idx %= 2 * nelts;
> >
> > that's odd - why do you need to clamp idx?
> Changed.
> This is just copy from original codes, it looks it's not needed any
> more, similar for your below comment.
> >
> > > + if (idx < nelts)
> > > + p = gimple_assign_rhs1 (def_stmt);
> > > + else
> > > + {
> > > + p = gimple_assign_rhs2 (def_stmt);
> > > + idx -= nelts;
> > > + }
> > > +
> > > + tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
> > > + unshare_expr (p), op1, bitsize_int (idx * elem_size));
> >
> > You don't need to unshare_expr 'p' here.
> >
> > Otherwise looks OK.
> >
> Here's updated patch.
Awesome.
OK for trunk if it boostraps/tests.
Thanks,
Richard.
> > Thanks,
> > Richard.
> >
> > > + gimple_assign_set_rhs1 (stmt, tem);
> > > + fold_stmt (gsi);
> > > + update_stmt (gsi_stmt (*gsi));
> > > + return true;
> > > }
> > >
> > > /* Determine whether applying the 2 permutations (mask1 then mask2)
> > > --
> > > 2.18.1
> > >
>
>
>
> --
> BR,
> Hongtao
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512f -O2" } */
+/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+32\(%.*%ymm} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+16\(%.*%xmm} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vmovq[ \t]+16\(%.*%xmm} 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-not {(?n)vpermd[ \t]+.*%zmm} } } */
+
+typedef int v16si __attribute__((vector_size(64)));
+typedef float v8sf __attribute__((vector_size(32)));
+typedef float v4sf __attribute__((vector_size(16)));
+typedef float v2sf __attribute__((vector_size(8)));
+
+v8sf part (v16si *srcp)
+{
+ v16si src = *srcp;
+ return (v8sf) { (float)src[8], (float) src[9], (float)src[10], (float)src[11],
+ (float)src[12], (float)src[13], (float)src[14], (float)src[15] };
+}
+
+v4sf part1 (v16si *srcp)
+{
+ v16si src = *srcp;
+ return (v4sf) { (float)src[4], (float)src[5], (float)src[6], (float)src[7] };
+}
+
+v2sf part2 (v16si *srcp)
+{
+ v16si src = *srcp;
+ return (v2sf) { (float)src[4], (float)src[5] };
+}
@@ -29,6 +29,6 @@ void odd (v2si *dst, v4si *srcp)
}
/* { dg-final { scan-tree-dump-times "BIT_FIELD_REF" 4 "cddce1" } } */
-/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
/* Ideally highpart extraction would elide the permutation as well. */
-/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
@@ -32,6 +32,6 @@ void odd (v4sf *dst, v8si *srcp)
/* Four conversions, on the smaller vector type, to not convert excess
elements. */
/* { dg-final { scan-tree-dump-times " = \\\(vector\\\(4\\\) float\\\)" 4 "cddce1" } } */
-/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
/* Ideally highpart extraction would elide the VEC_PERM_EXPR as well. */
-/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
@@ -2334,8 +2334,10 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
gimple *stmt = gsi_stmt (*gsi);
gimple *def_stmt;
tree op, op0, op1;
- tree elem_type;
- unsigned idx, size;
+ tree elem_type, type;
+ tree p, m, tem;
+ unsigned HOST_WIDE_INT nelts;
+ unsigned idx, size, elem_size;
enum tree_code code;
op = gimple_assign_rhs1 (stmt);
@@ -2353,42 +2355,75 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
op1 = TREE_OPERAND (op, 1);
code = gimple_assign_rhs_code (def_stmt);
elem_type = TREE_TYPE (TREE_TYPE (op0));
- if (TREE_TYPE (op) != elem_type)
+ type = TREE_TYPE (op);
+ /* Also hanlde vector type.
+ .i.e.
+ _7 = VEC_PERM_EXPR <_1, _1, { 2, 3, 2, 3 }>;
+ _11 = BIT_FIELD_REF <_7, 64, 0>;
+
+ to
+
+ _11 = BIT_FIELD_REF <_1, 64, 64>. */
+ if (type != elem_type
+ && (!VECTOR_TYPE_P (type) || TREE_TYPE (type) != elem_type))
return false;
- size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
+ elem_size = size = TREE_INT_CST_LOW (TYPE_SIZE (type));
if (maybe_ne (bit_field_size (op), size))
return false;
- if (code == VEC_PERM_EXPR
- && constant_multiple_p (bit_field_offset (op), size, &idx))
+ if (code != VEC_PERM_EXPR
+ || !constant_multiple_p (bit_field_offset (op), size, &idx))
+ return false;
+
+ m = gimple_assign_rhs3 (def_stmt);
+ if (TREE_CODE (m) != VECTOR_CST
+ || !VECTOR_CST_NELTS (m).is_constant (&nelts))
+ return false;
+
+ /* One element. */
+ if (type == elem_type)
+ idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
+ else
{
- tree p, m, tem;
- unsigned HOST_WIDE_INT nelts;
- m = gimple_assign_rhs3 (def_stmt);
- if (TREE_CODE (m) != VECTOR_CST
- || !VECTOR_CST_NELTS (m).is_constant (&nelts))
+ elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
+ unsigned nelts_op;
+ if (!constant_multiple_p (bit_field_size (op), elem_size, &nelts_op))
return false;
- idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
- idx %= 2 * nelts;
- if (idx < nelts)
- {
- p = gimple_assign_rhs1 (def_stmt);
- }
- else
+ unsigned start = TREE_INT_CST_LOW (vector_cst_elt (m, idx));
+ unsigned end = TREE_INT_CST_LOW (vector_cst_elt (m, idx + nelts_op - 1));
+ /* Be in the same vector. */
+ if ((start < nelts) != (end < nelts))
+ return false;
+ for (unsigned HOST_WIDE_INT i = 1; i != nelts_op; i++)
{
- p = gimple_assign_rhs2 (def_stmt);
- idx -= nelts;
+ /* Continuous area. */
+ if (TREE_INT_CST_LOW (vector_cst_elt (m, idx + i)) - 1
+ != TREE_INT_CST_LOW (vector_cst_elt (m, idx + i - 1)))
+ return false;
}
- tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
- unshare_expr (p), op1, bitsize_int (idx * size));
- gimple_assign_set_rhs1 (stmt, tem);
- fold_stmt (gsi);
- update_stmt (gsi_stmt (*gsi));
- return true;
+ /* Alignment not worse than before. */
+ unsigned dest_align = TREE_INT_CST_LOW (TYPE_SIZE (type));
+ if (start * elem_size % dest_align)
+ return false;
+ idx = start;
}
- return false;
+ idx %= 2 * nelts;
+ if (idx < nelts)
+ p = gimple_assign_rhs1 (def_stmt);
+ else
+ {
+ p = gimple_assign_rhs2 (def_stmt);
+ idx -= nelts;
+ }
+
+ tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
+ unshare_expr (p), op1, bitsize_int (idx * elem_size));
+ gimple_assign_set_rhs1 (stmt, tem);
+ fold_stmt (gsi);
+ update_stmt (gsi_stmt (*gsi));
+ return true;
}
/* Determine whether applying the 2 permutations (mask1 then mask2)