[x86,take,#2] Doubleword version of and; cmp to not; test optimization.

Message ID 00b501d88fd9$e89e6700$b9db3500$@nextmovesoftware.com
State Committed
Commit c73e8d45ca0111f51d7187641963df97f5c9c63f
Headers
Series [x86,take,#2] Doubleword version of and; cmp to not; test optimization. |

Commit Message

Roger Sayle July 4, 2022, 7:11 p.m. UTC
  This patch is the latest revision of the patch originally posted at:
https://gcc.gnu.org/pipermail/gcc-patches/2022-June/596201.html

This patch extends the earlier and;cmp to not;test optimization to also
perform this transformation for TImode on TARGET_64BIT and DImode on -m32,
One motivation for this is that it's a step to fixing the current failure
of gcc.target/i386/pr65105-5.c on -m32.

A more direct benefit for x86_64 is that the following code:

int foo(__int128 x, __int128 y)
{
  return (x & y) == y;
}

improves with -O2 from 15 instructions:

        movq    %rdi, %r8
        movq    %rsi, %rax
        movq    %rax, %rdi
        movq    %r8, %rsi
        movq    %rdx, %r8
        andq    %rdx, %rsi
        andq    %rcx, %rdi
        movq    %rsi, %rax
        movq    %rdi, %rdx
        xorq    %r8, %rax
        xorq    %rcx, %rdx
        orq     %rdx, %rax
        sete    %al
        movzbl  %al, %eax
        ret

to the slightly better 13 instructions:

        movq    %rdi, %r8
        movq    %rsi, %rax
        movq    %r8, %rsi
        movq    %rax, %rdi
        notq    %rsi
        notq    %rdi
        andq    %rdx, %rsi
        andq    %rcx, %rdi
        movq    %rsi, %rax
        orq     %rdi, %rax
        sete    %al
        movzbl  %al, %eax
        ret

Now that all of the doubleword pieces are already in the tree, this
patch is now much shorter (an rtx_costs improvement and a single new
define_insn_and_split), however I couldn't resist including two very
minor pattern naming tweaks/clean-ups to fix nits.

This revised patch has been tested on x86_64-pc-linux-gnu with
make bootstrap and make -k check, where on TARGET_64BIT there are
no new failures, and on --target_board=unix{-m32} with a single new
failure; the other dg-final in gcc.target/i386/pr65105-5.c now also
fails (as that code diverges further from the expected vectorized
output).  This is progress as both FAILs in pr65105-5.c may now be
fixed by changes localized to the STV pass.  OK for mainline?


2022-07-04  Roger Sayle  <roger@nextmovesoftware.com>

gcc/ChangeLog
        * config/i386/i386.cc (ix86_rtx_costs) <COMPARE>: Provide costs
        for double word comparisons and tests (comparisons against zero).
        * config/i386/i386.md (*test<mode>_not_doubleword): Split DWI
        and;cmp into andn;cmp $0 as a pre-reload splitter.
        (*andn<dwi>3_doubleword_bmi): Use <dwi> instead of <mode> in name.
        (*<any_or><dwi>3_doubleword): Likewise.

gcc/testsuite/ChangeLog
        * gcc.target/i386/testnot-3.c: New test case.


Thanks in advance,
Roger
--
  

Comments

Uros Bizjak July 5, 2022, 7:56 a.m. UTC | #1
On Mon, Jul 4, 2022 at 9:11 PM Roger Sayle <roger@nextmovesoftware.com> wrote:
>
>
> This patch is the latest revision of the patch originally posted at:
> https://gcc.gnu.org/pipermail/gcc-patches/2022-June/596201.html
>
> This patch extends the earlier and;cmp to not;test optimization to also
> perform this transformation for TImode on TARGET_64BIT and DImode on -m32,
> One motivation for this is that it's a step to fixing the current failure
> of gcc.target/i386/pr65105-5.c on -m32.
>
> A more direct benefit for x86_64 is that the following code:
>
> int foo(__int128 x, __int128 y)
> {
>   return (x & y) == y;
> }
>
> improves with -O2 from 15 instructions:
>
>         movq    %rdi, %r8
>         movq    %rsi, %rax
>         movq    %rax, %rdi
>         movq    %r8, %rsi
>         movq    %rdx, %r8
>         andq    %rdx, %rsi
>         andq    %rcx, %rdi
>         movq    %rsi, %rax
>         movq    %rdi, %rdx
>         xorq    %r8, %rax
>         xorq    %rcx, %rdx
>         orq     %rdx, %rax
>         sete    %al
>         movzbl  %al, %eax
>         ret
>
> to the slightly better 13 instructions:
>
>         movq    %rdi, %r8
>         movq    %rsi, %rax
>         movq    %r8, %rsi
>         movq    %rax, %rdi
>         notq    %rsi
>         notq    %rdi
>         andq    %rdx, %rsi
>         andq    %rcx, %rdi
>         movq    %rsi, %rax
>         orq     %rdi, %rax
>         sete    %al
>         movzbl  %al, %eax
>         ret
>
> Now that all of the doubleword pieces are already in the tree, this
> patch is now much shorter (an rtx_costs improvement and a single new
> define_insn_and_split), however I couldn't resist including two very
> minor pattern naming tweaks/clean-ups to fix nits.
>
> This revised patch has been tested on x86_64-pc-linux-gnu with
> make bootstrap and make -k check, where on TARGET_64BIT there are
> no new failures, and on --target_board=unix{-m32} with a single new
> failure; the other dg-final in gcc.target/i386/pr65105-5.c now also
> fails (as that code diverges further from the expected vectorized
> output).  This is progress as both FAILs in pr65105-5.c may now be
> fixed by changes localized to the STV pass.  OK for mainline?
>
>
> 2022-07-04  Roger Sayle  <roger@nextmovesoftware.com>
>
> gcc/ChangeLog
>         * config/i386/i386.cc (ix86_rtx_costs) <COMPARE>: Provide costs
>         for double word comparisons and tests (comparisons against zero).
>         * config/i386/i386.md (*test<mode>_not_doubleword): Split DWI
>         and;cmp into andn;cmp $0 as a pre-reload splitter.
>         (*andn<dwi>3_doubleword_bmi): Use <dwi> instead of <mode> in name.
>         (*<any_or><dwi>3_doubleword): Likewise.
>
> gcc/testsuite/ChangeLog
>         * gcc.target/i386/testnot-3.c: New test case.
>


+;; Split and;cmp (as optimized by combine) into andn;cmp $0
+(define_insn_and_split "*test<mode>_not_doubleword"
+  [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ
+  (and:DWI
+    (not:DWI (match_operand:DWI 0 "nonimmediate_operand"))
+    (match_operand:DWI 1 "nonimmediate_operand"))
+  (const_int 0)))]
+  "ix86_pre_reload_split ()"
+  "#"
+  "&& 1"
+  [(parallel
+      [(set (match_dup 2) (and:DWI (not:DWI (match_dup 0)) (match_dup 1)))
+       (clobber (reg:CC FLAGS_REG))])
+   (set (reg:CCZ FLAGS_REG) (compare:CCZ (match_dup 2) (const_int 0)))]
 {
+  operands[0] = force_reg (<MODE>mode, operands[0]);
   operands[2] = gen_reg_rtx (<MODE>mode);
 })

I don't think we can count on a follow-up split to lower ANDN for
!TARGET_BMI case, it is also a pre-reload splitter.

Please emit ANDN only for TARGET_BMI and NOT/AND for !TARGET_BMI instead.

Uros.

> Thanks in advance,
> Roger
> --
>
  
Uros Bizjak July 5, 2022, 8:24 a.m. UTC | #2
On Tue, Jul 5, 2022 at 9:56 AM Uros Bizjak <ubizjak@gmail.com> wrote:
>
> On Mon, Jul 4, 2022 at 9:11 PM Roger Sayle <roger@nextmovesoftware.com> wrote:
> >
> >
> > This patch is the latest revision of the patch originally posted at:
> > https://gcc.gnu.org/pipermail/gcc-patches/2022-June/596201.html
> >
> > This patch extends the earlier and;cmp to not;test optimization to also
> > perform this transformation for TImode on TARGET_64BIT and DImode on -m32,
> > One motivation for this is that it's a step to fixing the current failure
> > of gcc.target/i386/pr65105-5.c on -m32.
> >
> > A more direct benefit for x86_64 is that the following code:
> >
> > int foo(__int128 x, __int128 y)
> > {
> >   return (x & y) == y;
> > }
> >
> > improves with -O2 from 15 instructions:
> >
> >         movq    %rdi, %r8
> >         movq    %rsi, %rax
> >         movq    %rax, %rdi
> >         movq    %r8, %rsi
> >         movq    %rdx, %r8
> >         andq    %rdx, %rsi
> >         andq    %rcx, %rdi
> >         movq    %rsi, %rax
> >         movq    %rdi, %rdx
> >         xorq    %r8, %rax
> >         xorq    %rcx, %rdx
> >         orq     %rdx, %rax
> >         sete    %al
> >         movzbl  %al, %eax
> >         ret
> >
> > to the slightly better 13 instructions:
> >
> >         movq    %rdi, %r8
> >         movq    %rsi, %rax
> >         movq    %r8, %rsi
> >         movq    %rax, %rdi
> >         notq    %rsi
> >         notq    %rdi
> >         andq    %rdx, %rsi
> >         andq    %rcx, %rdi
> >         movq    %rsi, %rax
> >         orq     %rdi, %rax
> >         sete    %al
> >         movzbl  %al, %eax
> >         ret
> >
> > Now that all of the doubleword pieces are already in the tree, this
> > patch is now much shorter (an rtx_costs improvement and a single new
> > define_insn_and_split), however I couldn't resist including two very
> > minor pattern naming tweaks/clean-ups to fix nits.
> >
> > This revised patch has been tested on x86_64-pc-linux-gnu with
> > make bootstrap and make -k check, where on TARGET_64BIT there are
> > no new failures, and on --target_board=unix{-m32} with a single new
> > failure; the other dg-final in gcc.target/i386/pr65105-5.c now also
> > fails (as that code diverges further from the expected vectorized
> > output).  This is progress as both FAILs in pr65105-5.c may now be
> > fixed by changes localized to the STV pass.  OK for mainline?
> >
> >
> > 2022-07-04  Roger Sayle  <roger@nextmovesoftware.com>
> >
> > gcc/ChangeLog
> >         * config/i386/i386.cc (ix86_rtx_costs) <COMPARE>: Provide costs
> >         for double word comparisons and tests (comparisons against zero).
> >         * config/i386/i386.md (*test<mode>_not_doubleword): Split DWI
> >         and;cmp into andn;cmp $0 as a pre-reload splitter.
> >         (*andn<dwi>3_doubleword_bmi): Use <dwi> instead of <mode> in name.
> >         (*<any_or><dwi>3_doubleword): Likewise.
> >
> > gcc/testsuite/ChangeLog
> >         * gcc.target/i386/testnot-3.c: New test case.
> >
>
>
> +;; Split and;cmp (as optimized by combine) into andn;cmp $0
> +(define_insn_and_split "*test<mode>_not_doubleword"
> +  [(set (reg:CCZ FLAGS_REG)
> + (compare:CCZ
> +  (and:DWI
> +    (not:DWI (match_operand:DWI 0 "nonimmediate_operand"))
> +    (match_operand:DWI 1 "nonimmediate_operand"))
> +  (const_int 0)))]
> +  "ix86_pre_reload_split ()"
> +  "#"
> +  "&& 1"
> +  [(parallel
> +      [(set (match_dup 2) (and:DWI (not:DWI (match_dup 0)) (match_dup 1)))
> +       (clobber (reg:CC FLAGS_REG))])
> +   (set (reg:CCZ FLAGS_REG) (compare:CCZ (match_dup 2) (const_int 0)))]
>  {
> +  operands[0] = force_reg (<MODE>mode, operands[0]);
>    operands[2] = gen_reg_rtx (<MODE>mode);
>  })
>
> I don't think we can count on a follow-up split to lower ANDN for
> !TARGET_BMI case, it is also a pre-reload splitter.

Actually, splitters *CAN* be chained:


Splitting with gen_split_231 (i386.md:10408)
scanning new insn with uid = 30.
scanning new insn with uid = 31.
deleting insn with uid = 12.
Splitting with gen_split_235 (i386.md:10444)
scanning new insn with uid = 32.
scanning new insn with uid = 33.
deleting insn with uid = 30.
Splitting with gen_split_2 (i386.md:1510)
scanning new insn with uid = 34.
scanning new insn with uid = 35.
scanning new insn with uid = 36.
scanning new insn with uid = 37.
deleting insn with uid = 31.
deleting insn with uid = 12.

INSNs are recognized after they are split.

The patch is OK as is.

Thanks,
Uros.
  

Patch

diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index b15b489..70c9a27 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -20935,6 +20935,19 @@  ix86_rtx_costs (rtx x, machine_mode mode, int outer_code_i, int opno,
 	  return true;
 	}
 
+      if (SCALAR_INT_MODE_P (GET_MODE (op0))
+	  && GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
+	{
+	  if (op1 == const0_rtx)
+	    *total = cost->add
+		     + rtx_cost (op0, GET_MODE (op0), outer_code, opno, speed);
+	  else
+	    *total = 3*cost->add
+		     + rtx_cost (op0, GET_MODE (op0), outer_code, opno, speed)
+		     + rtx_cost (op1, GET_MODE (op0), outer_code, opno, speed);
+	  return true;
+	}
+
       /* The embedded comparison operand is completely free.  */
       if (!general_operand (op0, GET_MODE (op0)) && op1 == const0_rtx)
 	*total = 0;
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 20c3b9a..2492ad4 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -9792,7 +9792,25 @@ 
    (set (reg:CCZ FLAGS_REG)
 	(compare:CCZ (and:SWI (match_dup 2) (match_dup 1))
 		     (const_int 0)))]
+  "operands[2] = gen_reg_rtx (<MODE>mode);")
+
+;; Split and;cmp (as optimized by combine) into andn;cmp $0
+(define_insn_and_split "*test<mode>_not_doubleword"
+  [(set (reg:CCZ FLAGS_REG)
+	(compare:CCZ
+	  (and:DWI
+	    (not:DWI (match_operand:DWI 0 "nonimmediate_operand"))
+	    (match_operand:DWI 1 "nonimmediate_operand"))
+	  (const_int 0)))]
+  "ix86_pre_reload_split ()"
+  "#"
+  "&& 1"
+  [(parallel
+      [(set (match_dup 2) (and:DWI (not:DWI (match_dup 0)) (match_dup 1)))
+       (clobber (reg:CC FLAGS_REG))])
+   (set (reg:CCZ FLAGS_REG) (compare:CCZ (match_dup 2) (const_int 0)))]
 {
+  operands[0] = force_reg (<MODE>mode, operands[0]);
   operands[2] = gen_reg_rtx (<MODE>mode);
 })
 
@@ -10404,7 +10422,7 @@ 
   operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
 })
 
-(define_insn_and_split "*andn<mode>3_doubleword_bmi"
+(define_insn_and_split "*andn<dwi>3_doubleword_bmi"
   [(set (match_operand:<DWI> 0 "register_operand" "=r")
 	(and:<DWI>
 	  (not:<DWI> (match_operand:<DWI> 1 "register_operand" "r"))
@@ -10542,7 +10560,7 @@ 
   DONE;
 })
 
-(define_insn_and_split "*<code><mode>3_doubleword"
+(define_insn_and_split "*<code><dwi>3_doubleword"
   [(set (match_operand:<DWI> 0 "nonimmediate_operand" "=ro,r")
 	(any_or:<DWI>
 	 (match_operand:<DWI> 1 "nonimmediate_operand" "%0,0")
diff --git a/gcc/testsuite/gcc.target/i386/testnot-3.c b/gcc/testsuite/gcc.target/i386/testnot-3.c
new file mode 100644
index 0000000..7c54dbc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/testnot-3.c
@@ -0,0 +1,9 @@ 
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O2" } */
+
+int foo(__int128 x, __int128 y)
+{
+  return (x & y) == y;
+}
+
+/* { dg-final { scan-assembler-not "xorq" } } */