[v5] aarch64: Add vector popcount besides QImode [PR113859]

Message ID 20240618203554.20129-1-quic_pzheng@quicinc.com
State New
Headers
Series [v5] aarch64: Add vector popcount besides QImode [PR113859] |

Checks

Context Check Description
linaro-tcwg-bot/tcwg_gcc_build--master-arm success Build passed
linaro-tcwg-bot/tcwg_gcc_build--master-aarch64 success Build passed
linaro-tcwg-bot/tcwg_gcc_check--master-arm success Test passed
linaro-tcwg-bot/tcwg_gcc_check--master-aarch64 success Test passed

Commit Message

Pengxuan Zheng (QUIC) June 18, 2024, 8:35 p.m. UTC
  This patch improves GCC’s vectorization of __builtin_popcount for aarch64 target
by adding popcount patterns for vector modes besides QImode, i.e., HImode,
SImode and DImode.

With this patch, we now generate the following for V8HI:
  cnt     v1.16b, v0.16b
  uaddlp  v2.8h, v1.16b

For V4HI, we generate:
  cnt     v1.8b, v0.8b
  uaddlp  v2.4h, v1.8b

For V4SI, we generate:
  cnt     v1.16b, v0.16b
  uaddlp  v2.8h, v1.16b
  uaddlp  v3.4s, v2.8h

For V4SI with TARGET_DOTPROD, we generate the following instead:
  movi    v0.4s, #0
  movi    v1.16b, #1
  cnt     v3.16b, v2.16b
  udot    v0.4s, v3.16b, v1.16b

For V2SI, we generate:
  cnt     v1.8b, v.8b
  uaddlp  v2.4h, v1.8b
  uaddlp  v3.2s, v2.4h

For V2SI with TARGET_DOTPROD, we generate the following instead:
  movi    v0.8b, #0
  movi    v1.8b, #1
  cnt     v3.8b, v2.8b
  udot    v0.2s, v3.8b, v1.8b

For V2DI, we generate:
  cnt     v1.16b, v.16b
  uaddlp  v2.8h, v1.16b
  uaddlp  v3.4s, v2.8h
  uaddlp  v4.2d, v3.4s

For V4SI with TARGET_DOTPROD, we generate the following instead:
  movi    v0.4s, #0
  movi    v1.16b, #1
  cnt     v3.16b, v2.16b
  udot    v0.4s, v3.16b, v1.16b
  uaddlp  v0.2d, v0.4s

	PR target/113859

gcc/ChangeLog:

	* config/aarch64/aarch64-simd.md (aarch64_<su>addlp<mode>): Rename to...
	(@aarch64_<su>addlp<mode>): ... This.
	(popcount<mode>2): New define_expand.

gcc/testsuite/ChangeLog:

	* gcc.target/aarch64/popcnt-udot.c: New test.
	* gcc.target/aarch64/popcnt-vec.c: New test.

Signed-off-by: Pengxuan Zheng <quic_pzheng@quicinc.com>
---
 gcc/config/aarch64/aarch64-simd.md            | 51 +++++++++++++-
 .../gcc.target/aarch64/popcnt-udot.c          | 58 ++++++++++++++++
 gcc/testsuite/gcc.target/aarch64/popcnt-vec.c | 69 +++++++++++++++++++
 3 files changed, 177 insertions(+), 1 deletion(-)
 create mode 100644 gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
  

Comments

Richard Sandiford June 27, 2024, 5:23 p.m. UTC | #1
Sorry for the slow reply.

Pengxuan Zheng <quic_pzheng@quicinc.com> writes:
> This patch improves GCC’s vectorization of __builtin_popcount for aarch64 target
> by adding popcount patterns for vector modes besides QImode, i.e., HImode,
> SImode and DImode.
>
> With this patch, we now generate the following for V8HI:
>   cnt     v1.16b, v0.16b
>   uaddlp  v2.8h, v1.16b
>
> For V4HI, we generate:
>   cnt     v1.8b, v0.8b
>   uaddlp  v2.4h, v1.8b
>
> For V4SI, we generate:
>   cnt     v1.16b, v0.16b
>   uaddlp  v2.8h, v1.16b
>   uaddlp  v3.4s, v2.8h
>
> For V4SI with TARGET_DOTPROD, we generate the following instead:
>   movi    v0.4s, #0
>   movi    v1.16b, #1
>   cnt     v3.16b, v2.16b
>   udot    v0.4s, v3.16b, v1.16b
>
> For V2SI, we generate:
>   cnt     v1.8b, v.8b
>   uaddlp  v2.4h, v1.8b
>   uaddlp  v3.2s, v2.4h
>
> For V2SI with TARGET_DOTPROD, we generate the following instead:
>   movi    v0.8b, #0
>   movi    v1.8b, #1
>   cnt     v3.8b, v2.8b
>   udot    v0.2s, v3.8b, v1.8b
>
> For V2DI, we generate:
>   cnt     v1.16b, v.16b
>   uaddlp  v2.8h, v1.16b
>   uaddlp  v3.4s, v2.8h
>   uaddlp  v4.2d, v3.4s
>
> For V4SI with TARGET_DOTPROD, we generate the following instead:
>   movi    v0.4s, #0
>   movi    v1.16b, #1
>   cnt     v3.16b, v2.16b
>   udot    v0.4s, v3.16b, v1.16b
>   uaddlp  v0.2d, v0.4s
>
> 	PR target/113859
>
> gcc/ChangeLog:
>
> 	* config/aarch64/aarch64-simd.md (aarch64_<su>addlp<mode>): Rename to...
> 	(@aarch64_<su>addlp<mode>): ... This.
> 	(popcount<mode>2): New define_expand.
>
> gcc/testsuite/ChangeLog:
>
> 	* gcc.target/aarch64/popcnt-udot.c: New test.
> 	* gcc.target/aarch64/popcnt-vec.c: New test.
>
> Signed-off-by: Pengxuan Zheng <quic_pzheng@quicinc.com>
> ---
>  gcc/config/aarch64/aarch64-simd.md            | 51 +++++++++++++-
>  .../gcc.target/aarch64/popcnt-udot.c          | 58 ++++++++++++++++
>  gcc/testsuite/gcc.target/aarch64/popcnt-vec.c | 69 +++++++++++++++++++
>  3 files changed, 177 insertions(+), 1 deletion(-)
>  create mode 100644 gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
>  create mode 100644 gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
>
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 0bb39091a38..1c76123a518 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -3461,7 +3461,7 @@ (define_insn "*aarch64_<su>addlv<VDQV_L:mode>_ze<GPI:mode>"
>    [(set_attr "type" "neon_reduc_add<VDQV_L:q>")]
>  )
>  
> -(define_expand "aarch64_<su>addlp<mode>"
> +(define_expand "@aarch64_<su>addlp<mode>"
>    [(set (match_operand:<VDBLW> 0 "register_operand")
>  	(plus:<VDBLW>
>  	  (vec_select:<VDBLW>
> @@ -3517,6 +3517,55 @@ (define_insn "popcount<mode>2<vczle><vczbe>"
>    [(set_attr "type" "neon_cnt<q>")]
>  )
>  
> +(define_expand "popcount<mode>2"
> +  [(set (match_operand:VDQHSD 0 "register_operand")
> +        (popcount:VDQHSD (match_operand:VDQHSD 1 "register_operand")))]
> +  "TARGET_SIMD"
> +  {
> +    /* Generate a byte popcount. */
> +    machine_mode mode = <bitsize> == 64 ? V8QImode : V16QImode;
> +    rtx tmp = gen_reg_rtx (mode);
> +    auto icode = optab_handler (popcount_optab, mode);
> +    emit_insn (GEN_FCN (icode) (tmp, gen_lowpart (mode, operands[1])));
> +
> +    if (TARGET_DOTPROD)
> +      {
> +        /* For V4SI and V2SI, we can generate a UDOT with a 0 accumulator and a
> +           1 multiplicand. For V2DI, another UAADDLP is needed. */
> +        if (<VEL>mode == SImode || <VEL>mode == DImode)

How about combining these into a single if:

  if (TARGET_DOTPROD
      && (<VEL>mode == SImode || <VEL>mode == DImode))

> +          {
> +            machine_mode dp_mode = <bitsize> == 64 ? V2SImode : V4SImode;
> +            rtx ones = force_reg (mode, CONST1_RTX (mode));
> +            rtx zeros = CONST0_RTX (dp_mode);
> +            rtx dp = gen_reg_rtx (dp_mode);
> +            auto dp_icode = optab_handler (udot_prod_optab, mode);
> +            emit_move_insn (dp, zeros);
> +            emit_insn (GEN_FCN (dp_icode) (dp, tmp, ones, dp));
> +            if (<MODE>mode == V2DImode)
> +              {
> +                emit_insn (gen_aarch64_uaddlpv4si (operands[0], dp));
> +                DONE;
> +              }
> +            emit_move_insn (operands[0], dp);
> +            DONE;
> +          }

It's minor, but I think we should write this as something like:

    {
      rtx ones = force_reg (mode, CONST1_RTX (mode));
      mode = <bitsize> == 64 ? V2SImode : V4SImode;
      rtx dest = mode == <MODE>mode ? operands[0] : gen_reg_rtx (mode);
      rtx zeros = force_reg (mode, CONST0_RTX (mode));
      auto dp_icode = optab_handler (udot_prod_optab, mode);
      emit_insn (GEN_FCN (dp_icode) (dest, tmp, ones, zeros));
      tmp = dest;
    }

so that we don't repeat the final SI->DI stage.

> +      }
> +
> +    /* Use a sequence of UADDLPs to accumulate the counts. Each step doubles
> +       the element size and halves the number of elements. */
> +    do
> +      {
> +        auto icode = code_for_aarch64_addlp (ZERO_EXTEND, GET_MODE (tmp));
> +        mode = insn_data[icode].operand[0].mode;
> +        rtx dest = mode == <MODE>mode ? operands[0] : gen_reg_rtx (mode);
> +        emit_insn (GEN_FCN (icode) (dest, tmp));
> +        tmp = dest;
> +      }
> +    while (mode != <MODE>mode);
> +    DONE;
> +  }
> +)
> +
>  ;; 'across lanes' max and min ops.
>  
>  ;; Template for outputting a scalar, so we can create __builtins which can be
> diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c b/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
> new file mode 100644
> index 00000000000..150ff746361
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
> @@ -0,0 +1,58 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O2 -march=armv8.2-a+dotprod -fno-vect-cost-model" } */

These tests should add -fno-schedule-insns -fno-schedule-insns2,
since nothing constrains the order of the movis wrt the other
pre-udot code.

Thanks,
Richard

> +
> +/*
> +** bar:
> +**	ldr	q([0-9]+), \[x0\]
> +**	movi	v([0-9]+).16b, 0x1
> +**	movi	v([0-9]+).4s, 0
> +**	cnt	v([0-9]+).16b, v\1.16b
> +**	udot	v\3.4s, v\4.16b, v\2.16b
> +**	str	q\3, \[x1\]
> +**	ret
> +*/
> +void
> +bar (unsigned int *__restrict b, unsigned int *__restrict d)
> +{
> +  d[0] = __builtin_popcount (b[0]);
> +  d[1] = __builtin_popcount (b[1]);
> +  d[2] = __builtin_popcount (b[2]);
> +  d[3] = __builtin_popcount (b[3]);
> +}
> +
> +/*
> +** bar1:
> +**	ldr	d([0-9]+), \[x0\]
> +**	movi	v([0-9]+).8b, 0x1
> +**	movi	v([0-9]+).2s, 0
> +**	cnt	v([0-9]+).8b, v\1.8b
> +**	udot	v\3.2s, v\4.8b, v\2.8b
> +**	str	d\3, \[x1\]
> +**	ret
> +*/
> +void
> +bar1 (unsigned int *__restrict b, unsigned int *__restrict d)
> +{
> +  d[0] = __builtin_popcount (b[0]);
> +  d[1] = __builtin_popcount (b[1]);
> +}
> +
> +/*
> +** bar2:
> +**	ldr	q([0-9]+), \[x0\]
> +**	movi	v([0-9]+).16b, 0x1
> +**	movi	v([0-9]+).4s, 0
> +**	cnt	v([0-9]+).16b, v\1.16b
> +**	udot	v\3.4s, v\4.16b, v\2.16b
> +**	uaddlp	v\3.2d, v\3.4s
> +**	str	q\3, \[x1\]
> +**	ret
> +*/
> +void
> +bar2 (unsigned long long *__restrict b, unsigned long long *__restrict d)
> +{
> +  d[0] = __builtin_popcountll (b[0]);
> +  d[1] = __builtin_popcountll (b[1]);
> +}
> +
> +/* { dg-final { check-function-bodies "**" "" "" } } */
> diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c b/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
> new file mode 100644
> index 00000000000..0c4926d7ca8
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
> @@ -0,0 +1,69 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O2 -fno-vect-cost-model" } */
> +
> +/* This function should produce cnt v.16b. */
> +void
> +bar (unsigned char *__restrict b, unsigned char *__restrict d)
> +{
> +  for (int i = 0; i < 1024; i++)
> +    d[i] = __builtin_popcount (b[i]);
> +}
> +
> +/* This function should produce cnt v.16b and uaddlp (Add Long Pairwise). */
> +void
> +bar1 (unsigned short *__restrict b, unsigned short *__restrict d)
> +{
> +  for (int i = 0; i < 1024; i++)
> +    d[i] = __builtin_popcount (b[i]);
> +}
> +
> +/* This function should produce cnt v.16b and 2 uaddlp (Add Long Pairwise). */
> +void
> +bar2 (unsigned int *__restrict b, unsigned int *__restrict d)
> +{
> +  for (int i = 0; i < 1024; i++)
> +    d[i] = __builtin_popcount (b[i]);
> +}
> +
> +/* This function should produce cnt v.16b and 3 uaddlp (Add Long Pairwise). */
> +void
> +bar3 (unsigned long long *__restrict b, unsigned long long *__restrict d)
> +{
> +  for (int i = 0; i < 1024; i++)
> +    d[i] = __builtin_popcountll (b[i]);
> +}
> +
> +/* SLP
> +   This function should produce cnt v.8b and uaddlp (Add Long Pairwise). */
> +void
> +bar4 (unsigned short *__restrict b, unsigned short *__restrict d)
> +{
> +  d[0] = __builtin_popcount (b[0]);
> +  d[1] = __builtin_popcount (b[1]);
> +  d[2] = __builtin_popcount (b[2]);
> +  d[3] = __builtin_popcount (b[3]);
> +}
> +
> +/* SLP
> +   This function should produce cnt v.8b and 2 uaddlp (Add Long Pairwise). */
> +void
> +bar5 (unsigned int *__restrict b, unsigned int *__restrict d)
> +{
> +  d[0] = __builtin_popcount (b[0]);
> +  d[1] = __builtin_popcount (b[1]);
> +}
> +
> +/* SLP
> +   This function should produce cnt v.16b and 3 uaddlp (Add Long Pairwise). */
> +void
> +bar6 (unsigned long long *__restrict b, unsigned long long *__restrict d)
> +{
> +  d[0] = __builtin_popcountll (b[0]);
> +  d[1] = __builtin_popcountll (b[1]);
> +}
> +
> +/* { dg-final { scan-assembler-not {\tbl\tpopcount} } } */
> +/* { dg-final { scan-assembler-times {cnt\t} 7 } } */
> +/* { dg-final { scan-assembler-times {uaddlp\t} 12 } } */
> +/* { dg-final { scan-assembler-times {ldr\tq} 5 } } */
> +/* { dg-final { scan-assembler-times {ldr\td} 2 } } */
  
Pengxuan Zheng (QUIC) June 28, 2024, 12:55 a.m. UTC | #2
Thanks, Richard! I've updated the patch accordingly.

https://gcc.gnu.org/pipermail/gcc-patches/2024-June/655912.html

Please let me know if any other changes are needed.

Thanks,
Pengxuan
> Sorry for the slow reply.
> 
> Pengxuan Zheng <quic_pzheng@quicinc.com> writes:
> > This patch improves GCC’s vectorization of __builtin_popcount for
> > aarch64 target by adding popcount patterns for vector modes besides
> > QImode, i.e., HImode, SImode and DImode.
> >
> > With this patch, we now generate the following for V8HI:
> >   cnt     v1.16b, v0.16b
> >   uaddlp  v2.8h, v1.16b
> >
> > For V4HI, we generate:
> >   cnt     v1.8b, v0.8b
> >   uaddlp  v2.4h, v1.8b
> >
> > For V4SI, we generate:
> >   cnt     v1.16b, v0.16b
> >   uaddlp  v2.8h, v1.16b
> >   uaddlp  v3.4s, v2.8h
> >
> > For V4SI with TARGET_DOTPROD, we generate the following instead:
> >   movi    v0.4s, #0
> >   movi    v1.16b, #1
> >   cnt     v3.16b, v2.16b
> >   udot    v0.4s, v3.16b, v1.16b
> >
> > For V2SI, we generate:
> >   cnt     v1.8b, v.8b
> >   uaddlp  v2.4h, v1.8b
> >   uaddlp  v3.2s, v2.4h
> >
> > For V2SI with TARGET_DOTPROD, we generate the following instead:
> >   movi    v0.8b, #0
> >   movi    v1.8b, #1
> >   cnt     v3.8b, v2.8b
> >   udot    v0.2s, v3.8b, v1.8b
> >
> > For V2DI, we generate:
> >   cnt     v1.16b, v.16b
> >   uaddlp  v2.8h, v1.16b
> >   uaddlp  v3.4s, v2.8h
> >   uaddlp  v4.2d, v3.4s
> >
> > For V4SI with TARGET_DOTPROD, we generate the following instead:
> >   movi    v0.4s, #0
> >   movi    v1.16b, #1
> >   cnt     v3.16b, v2.16b
> >   udot    v0.4s, v3.16b, v1.16b
> >   uaddlp  v0.2d, v0.4s
> >
> > 	PR target/113859
> >
> > gcc/ChangeLog:
> >
> > 	* config/aarch64/aarch64-simd.md (aarch64_<su>addlp<mode>):
> Rename to...
> > 	(@aarch64_<su>addlp<mode>): ... This.
> > 	(popcount<mode>2): New define_expand.
> >
> > gcc/testsuite/ChangeLog:
> >
> > 	* gcc.target/aarch64/popcnt-udot.c: New test.
> > 	* gcc.target/aarch64/popcnt-vec.c: New test.
> >
> > Signed-off-by: Pengxuan Zheng <quic_pzheng@quicinc.com>
> > ---
> >  gcc/config/aarch64/aarch64-simd.md            | 51 +++++++++++++-
> >  .../gcc.target/aarch64/popcnt-udot.c          | 58 ++++++++++++++++
> >  gcc/testsuite/gcc.target/aarch64/popcnt-vec.c | 69
> > +++++++++++++++++++
> >  3 files changed, 177 insertions(+), 1 deletion(-)  create mode 100644
> > gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
> >  create mode 100644 gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
> >
> > diff --git a/gcc/config/aarch64/aarch64-simd.md
> > b/gcc/config/aarch64/aarch64-simd.md
> > index 0bb39091a38..1c76123a518 100644
> > --- a/gcc/config/aarch64/aarch64-simd.md
> > +++ b/gcc/config/aarch64/aarch64-simd.md
> > @@ -3461,7 +3461,7 @@ (define_insn
> "*aarch64_<su>addlv<VDQV_L:mode>_ze<GPI:mode>"
> >    [(set_attr "type" "neon_reduc_add<VDQV_L:q>")]
> >  )
> >
> > -(define_expand "aarch64_<su>addlp<mode>"
> > +(define_expand "@aarch64_<su>addlp<mode>"
> >    [(set (match_operand:<VDBLW> 0 "register_operand")
> >  	(plus:<VDBLW>
> >  	  (vec_select:<VDBLW>
> > @@ -3517,6 +3517,55 @@ (define_insn
> "popcount<mode>2<vczle><vczbe>"
> >    [(set_attr "type" "neon_cnt<q>")]
> >  )
> >
> > +(define_expand "popcount<mode>2"
> > +  [(set (match_operand:VDQHSD 0 "register_operand")
> > +        (popcount:VDQHSD (match_operand:VDQHSD 1
> > +"register_operand")))]
> > +  "TARGET_SIMD"
> > +  {
> > +    /* Generate a byte popcount. */
> > +    machine_mode mode = <bitsize> == 64 ? V8QImode : V16QImode;
> > +    rtx tmp = gen_reg_rtx (mode);
> > +    auto icode = optab_handler (popcount_optab, mode);
> > +    emit_insn (GEN_FCN (icode) (tmp, gen_lowpart (mode,
> > +operands[1])));
> > +
> > +    if (TARGET_DOTPROD)
> > +      {
> > +        /* For V4SI and V2SI, we can generate a UDOT with a 0 accumulator
> and a
> > +           1 multiplicand. For V2DI, another UAADDLP is needed. */
> > +        if (<VEL>mode == SImode || <VEL>mode == DImode)
> 
> How about combining these into a single if:
> 
>   if (TARGET_DOTPROD
>       && (<VEL>mode == SImode || <VEL>mode == DImode))
> 
> > +          {
> > +            machine_mode dp_mode = <bitsize> == 64 ? V2SImode : V4SImode;
> > +            rtx ones = force_reg (mode, CONST1_RTX (mode));
> > +            rtx zeros = CONST0_RTX (dp_mode);
> > +            rtx dp = gen_reg_rtx (dp_mode);
> > +            auto dp_icode = optab_handler (udot_prod_optab, mode);
> > +            emit_move_insn (dp, zeros);
> > +            emit_insn (GEN_FCN (dp_icode) (dp, tmp, ones, dp));
> > +            if (<MODE>mode == V2DImode)
> > +              {
> > +                emit_insn (gen_aarch64_uaddlpv4si (operands[0], dp));
> > +                DONE;
> > +              }
> > +            emit_move_insn (operands[0], dp);
> > +            DONE;
> > +          }
> 
> It's minor, but I think we should write this as something like:
> 
>     {
>       rtx ones = force_reg (mode, CONST1_RTX (mode));
>       mode = <bitsize> == 64 ? V2SImode : V4SImode;
>       rtx dest = mode == <MODE>mode ? operands[0] : gen_reg_rtx (mode);
>       rtx zeros = force_reg (mode, CONST0_RTX (mode));
>       auto dp_icode = optab_handler (udot_prod_optab, mode);
>       emit_insn (GEN_FCN (dp_icode) (dest, tmp, ones, zeros));
>       tmp = dest;
>     }
> 
> so that we don't repeat the final SI->DI stage.
> 
> > +      }
> > +
> > +    /* Use a sequence of UADDLPs to accumulate the counts. Each step
> doubles
> > +       the element size and halves the number of elements. */
> > +    do
> > +      {
> > +        auto icode = code_for_aarch64_addlp (ZERO_EXTEND, GET_MODE
> (tmp));
> > +        mode = insn_data[icode].operand[0].mode;
> > +        rtx dest = mode == <MODE>mode ? operands[0] : gen_reg_rtx
> (mode);
> > +        emit_insn (GEN_FCN (icode) (dest, tmp));
> > +        tmp = dest;
> > +      }
> > +    while (mode != <MODE>mode);
> > +    DONE;
> > +  }
> > +)
> > +
> >  ;; 'across lanes' max and min ops.
> >
> >  ;; Template for outputting a scalar, so we can create __builtins
> > which can be diff --git
> > a/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
> > b/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
> > new file mode 100644
> > index 00000000000..150ff746361
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
> > @@ -0,0 +1,58 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-O2 -march=armv8.2-a+dotprod -fno-vect-cost-model" }
> > +*/
> 
> These tests should add -fno-schedule-insns -fno-schedule-insns2, since
> nothing constrains the order of the movis wrt the other pre-udot code.
> 
> Thanks,
> Richard
> 
> > +
> > +/*
> > +** bar:
> > +**	ldr	q([0-9]+), \[x0\]
> > +**	movi	v([0-9]+).16b, 0x1
> > +**	movi	v([0-9]+).4s, 0
> > +**	cnt	v([0-9]+).16b, v\1.16b
> > +**	udot	v\3.4s, v\4.16b, v\2.16b
> > +**	str	q\3, \[x1\]
> > +**	ret
> > +*/
> > +void
> > +bar (unsigned int *__restrict b, unsigned int *__restrict d) {
> > +  d[0] = __builtin_popcount (b[0]);
> > +  d[1] = __builtin_popcount (b[1]);
> > +  d[2] = __builtin_popcount (b[2]);
> > +  d[3] = __builtin_popcount (b[3]);
> > +}
> > +
> > +/*
> > +** bar1:
> > +**	ldr	d([0-9]+), \[x0\]
> > +**	movi	v([0-9]+).8b, 0x1
> > +**	movi	v([0-9]+).2s, 0
> > +**	cnt	v([0-9]+).8b, v\1.8b
> > +**	udot	v\3.2s, v\4.8b, v\2.8b
> > +**	str	d\3, \[x1\]
> > +**	ret
> > +*/
> > +void
> > +bar1 (unsigned int *__restrict b, unsigned int *__restrict d) {
> > +  d[0] = __builtin_popcount (b[0]);
> > +  d[1] = __builtin_popcount (b[1]);
> > +}
> > +
> > +/*
> > +** bar2:
> > +**	ldr	q([0-9]+), \[x0\]
> > +**	movi	v([0-9]+).16b, 0x1
> > +**	movi	v([0-9]+).4s, 0
> > +**	cnt	v([0-9]+).16b, v\1.16b
> > +**	udot	v\3.4s, v\4.16b, v\2.16b
> > +**	uaddlp	v\3.2d, v\3.4s
> > +**	str	q\3, \[x1\]
> > +**	ret
> > +*/
> > +void
> > +bar2 (unsigned long long *__restrict b, unsigned long long
> > +*__restrict d) {
> > +  d[0] = __builtin_popcountll (b[0]);
> > +  d[1] = __builtin_popcountll (b[1]); }
> > +
> > +/* { dg-final { check-function-bodies "**" "" "" } } */
> > diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
> > b/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
> > new file mode 100644
> > index 00000000000..0c4926d7ca8
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
> > @@ -0,0 +1,69 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-O2 -fno-vect-cost-model" } */
> > +
> > +/* This function should produce cnt v.16b. */ void bar (unsigned char
> > +*__restrict b, unsigned char *__restrict d) {
> > +  for (int i = 0; i < 1024; i++)
> > +    d[i] = __builtin_popcount (b[i]); }
> > +
> > +/* This function should produce cnt v.16b and uaddlp (Add Long
> > +Pairwise). */ void
> > +bar1 (unsigned short *__restrict b, unsigned short *__restrict d) {
> > +  for (int i = 0; i < 1024; i++)
> > +    d[i] = __builtin_popcount (b[i]); }
> > +
> > +/* This function should produce cnt v.16b and 2 uaddlp (Add Long
> > +Pairwise). */ void
> > +bar2 (unsigned int *__restrict b, unsigned int *__restrict d) {
> > +  for (int i = 0; i < 1024; i++)
> > +    d[i] = __builtin_popcount (b[i]); }
> > +
> > +/* This function should produce cnt v.16b and 3 uaddlp (Add Long
> > +Pairwise). */ void
> > +bar3 (unsigned long long *__restrict b, unsigned long long
> > +*__restrict d) {
> > +  for (int i = 0; i < 1024; i++)
> > +    d[i] = __builtin_popcountll (b[i]); }
> > +
> > +/* SLP
> > +   This function should produce cnt v.8b and uaddlp (Add Long
> > +Pairwise). */ void
> > +bar4 (unsigned short *__restrict b, unsigned short *__restrict d) {
> > +  d[0] = __builtin_popcount (b[0]);
> > +  d[1] = __builtin_popcount (b[1]);
> > +  d[2] = __builtin_popcount (b[2]);
> > +  d[3] = __builtin_popcount (b[3]);
> > +}
> > +
> > +/* SLP
> > +   This function should produce cnt v.8b and 2 uaddlp (Add Long
> > +Pairwise). */ void
> > +bar5 (unsigned int *__restrict b, unsigned int *__restrict d) {
> > +  d[0] = __builtin_popcount (b[0]);
> > +  d[1] = __builtin_popcount (b[1]);
> > +}
> > +
> > +/* SLP
> > +   This function should produce cnt v.16b and 3 uaddlp (Add Long
> > +Pairwise). */ void
> > +bar6 (unsigned long long *__restrict b, unsigned long long
> > +*__restrict d) {
> > +  d[0] = __builtin_popcountll (b[0]);
> > +  d[1] = __builtin_popcountll (b[1]); }
> > +
> > +/* { dg-final { scan-assembler-not {\tbl\tpopcount} } } */
> > +/* { dg-final { scan-assembler-times {cnt\t} 7 } } */
> > +/* { dg-final { scan-assembler-times {uaddlp\t} 12 } } */
> > +/* { dg-final { scan-assembler-times {ldr\tq} 5 } } */
> > +/* { dg-final { scan-assembler-times {ldr\td} 2 } } */
  

Patch

diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 0bb39091a38..1c76123a518 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -3461,7 +3461,7 @@  (define_insn "*aarch64_<su>addlv<VDQV_L:mode>_ze<GPI:mode>"
   [(set_attr "type" "neon_reduc_add<VDQV_L:q>")]
 )
 
-(define_expand "aarch64_<su>addlp<mode>"
+(define_expand "@aarch64_<su>addlp<mode>"
   [(set (match_operand:<VDBLW> 0 "register_operand")
 	(plus:<VDBLW>
 	  (vec_select:<VDBLW>
@@ -3517,6 +3517,55 @@  (define_insn "popcount<mode>2<vczle><vczbe>"
   [(set_attr "type" "neon_cnt<q>")]
 )
 
+(define_expand "popcount<mode>2"
+  [(set (match_operand:VDQHSD 0 "register_operand")
+        (popcount:VDQHSD (match_operand:VDQHSD 1 "register_operand")))]
+  "TARGET_SIMD"
+  {
+    /* Generate a byte popcount. */
+    machine_mode mode = <bitsize> == 64 ? V8QImode : V16QImode;
+    rtx tmp = gen_reg_rtx (mode);
+    auto icode = optab_handler (popcount_optab, mode);
+    emit_insn (GEN_FCN (icode) (tmp, gen_lowpart (mode, operands[1])));
+
+    if (TARGET_DOTPROD)
+      {
+        /* For V4SI and V2SI, we can generate a UDOT with a 0 accumulator and a
+           1 multiplicand. For V2DI, another UAADDLP is needed. */
+        if (<VEL>mode == SImode || <VEL>mode == DImode)
+          {
+            machine_mode dp_mode = <bitsize> == 64 ? V2SImode : V4SImode;
+            rtx ones = force_reg (mode, CONST1_RTX (mode));
+            rtx zeros = CONST0_RTX (dp_mode);
+            rtx dp = gen_reg_rtx (dp_mode);
+            auto dp_icode = optab_handler (udot_prod_optab, mode);
+            emit_move_insn (dp, zeros);
+            emit_insn (GEN_FCN (dp_icode) (dp, tmp, ones, dp));
+            if (<MODE>mode == V2DImode)
+              {
+                emit_insn (gen_aarch64_uaddlpv4si (operands[0], dp));
+                DONE;
+              }
+            emit_move_insn (operands[0], dp);
+            DONE;
+          }
+      }
+
+    /* Use a sequence of UADDLPs to accumulate the counts. Each step doubles
+       the element size and halves the number of elements. */
+    do
+      {
+        auto icode = code_for_aarch64_addlp (ZERO_EXTEND, GET_MODE (tmp));
+        mode = insn_data[icode].operand[0].mode;
+        rtx dest = mode == <MODE>mode ? operands[0] : gen_reg_rtx (mode);
+        emit_insn (GEN_FCN (icode) (dest, tmp));
+        tmp = dest;
+      }
+    while (mode != <MODE>mode);
+    DONE;
+  }
+)
+
 ;; 'across lanes' max and min ops.
 
 ;; Template for outputting a scalar, so we can create __builtins which can be
diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c b/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
new file mode 100644
index 00000000000..150ff746361
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/popcnt-udot.c
@@ -0,0 +1,58 @@ 
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=armv8.2-a+dotprod -fno-vect-cost-model" } */
+
+/*
+** bar:
+**	ldr	q([0-9]+), \[x0\]
+**	movi	v([0-9]+).16b, 0x1
+**	movi	v([0-9]+).4s, 0
+**	cnt	v([0-9]+).16b, v\1.16b
+**	udot	v\3.4s, v\4.16b, v\2.16b
+**	str	q\3, \[x1\]
+**	ret
+*/
+void
+bar (unsigned int *__restrict b, unsigned int *__restrict d)
+{
+  d[0] = __builtin_popcount (b[0]);
+  d[1] = __builtin_popcount (b[1]);
+  d[2] = __builtin_popcount (b[2]);
+  d[3] = __builtin_popcount (b[3]);
+}
+
+/*
+** bar1:
+**	ldr	d([0-9]+), \[x0\]
+**	movi	v([0-9]+).8b, 0x1
+**	movi	v([0-9]+).2s, 0
+**	cnt	v([0-9]+).8b, v\1.8b
+**	udot	v\3.2s, v\4.8b, v\2.8b
+**	str	d\3, \[x1\]
+**	ret
+*/
+void
+bar1 (unsigned int *__restrict b, unsigned int *__restrict d)
+{
+  d[0] = __builtin_popcount (b[0]);
+  d[1] = __builtin_popcount (b[1]);
+}
+
+/*
+** bar2:
+**	ldr	q([0-9]+), \[x0\]
+**	movi	v([0-9]+).16b, 0x1
+**	movi	v([0-9]+).4s, 0
+**	cnt	v([0-9]+).16b, v\1.16b
+**	udot	v\3.4s, v\4.16b, v\2.16b
+**	uaddlp	v\3.2d, v\3.4s
+**	str	q\3, \[x1\]
+**	ret
+*/
+void
+bar2 (unsigned long long *__restrict b, unsigned long long *__restrict d)
+{
+  d[0] = __builtin_popcountll (b[0]);
+  d[1] = __builtin_popcountll (b[1]);
+}
+
+/* { dg-final { check-function-bodies "**" "" "" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c b/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
new file mode 100644
index 00000000000..0c4926d7ca8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/popcnt-vec.c
@@ -0,0 +1,69 @@ 
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-vect-cost-model" } */
+
+/* This function should produce cnt v.16b. */
+void
+bar (unsigned char *__restrict b, unsigned char *__restrict d)
+{
+  for (int i = 0; i < 1024; i++)
+    d[i] = __builtin_popcount (b[i]);
+}
+
+/* This function should produce cnt v.16b and uaddlp (Add Long Pairwise). */
+void
+bar1 (unsigned short *__restrict b, unsigned short *__restrict d)
+{
+  for (int i = 0; i < 1024; i++)
+    d[i] = __builtin_popcount (b[i]);
+}
+
+/* This function should produce cnt v.16b and 2 uaddlp (Add Long Pairwise). */
+void
+bar2 (unsigned int *__restrict b, unsigned int *__restrict d)
+{
+  for (int i = 0; i < 1024; i++)
+    d[i] = __builtin_popcount (b[i]);
+}
+
+/* This function should produce cnt v.16b and 3 uaddlp (Add Long Pairwise). */
+void
+bar3 (unsigned long long *__restrict b, unsigned long long *__restrict d)
+{
+  for (int i = 0; i < 1024; i++)
+    d[i] = __builtin_popcountll (b[i]);
+}
+
+/* SLP
+   This function should produce cnt v.8b and uaddlp (Add Long Pairwise). */
+void
+bar4 (unsigned short *__restrict b, unsigned short *__restrict d)
+{
+  d[0] = __builtin_popcount (b[0]);
+  d[1] = __builtin_popcount (b[1]);
+  d[2] = __builtin_popcount (b[2]);
+  d[3] = __builtin_popcount (b[3]);
+}
+
+/* SLP
+   This function should produce cnt v.8b and 2 uaddlp (Add Long Pairwise). */
+void
+bar5 (unsigned int *__restrict b, unsigned int *__restrict d)
+{
+  d[0] = __builtin_popcount (b[0]);
+  d[1] = __builtin_popcount (b[1]);
+}
+
+/* SLP
+   This function should produce cnt v.16b and 3 uaddlp (Add Long Pairwise). */
+void
+bar6 (unsigned long long *__restrict b, unsigned long long *__restrict d)
+{
+  d[0] = __builtin_popcountll (b[0]);
+  d[1] = __builtin_popcountll (b[1]);
+}
+
+/* { dg-final { scan-assembler-not {\tbl\tpopcount} } } */
+/* { dg-final { scan-assembler-times {cnt\t} 7 } } */
+/* { dg-final { scan-assembler-times {uaddlp\t} 12 } } */
+/* { dg-final { scan-assembler-times {ldr\tq} 5 } } */
+/* { dg-final { scan-assembler-times {ldr\td} 2 } } */