RISC-V: Use vmv1r.v instead of vmv.v.v for fma output reloads [PR114200].
Checks
Context |
Check |
Description |
rivoscibot/toolchain-ci-rivos-lint |
success
|
Lint passed
|
rivoscibot/toolchain-ci-rivos-apply-patch |
success
|
Patch applied
|
rivoscibot/toolchain-ci-rivos-build--newlib-rv64gcv-lp64d-multilib |
success
|
Build passed
|
rivoscibot/toolchain-ci-rivos-build--linux-rv64gcv-lp64d-multilib |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_gcc_build--master-arm |
success
|
Testing passed
|
rivoscibot/toolchain-ci-rivos-build--newlib-rv64gc-lp64d-multilib |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_gcc_build--master-aarch64 |
success
|
Testing passed
|
rivoscibot/toolchain-ci-rivos-build--linux-rv64gc_zba_zbb_zbc_zbs-lp64d-non-multilib |
success
|
Build passed
|
rivoscibot/toolchain-ci-rivos-build--linux-rv32gc_zba_zbb_zbc_zbs-ilp32d-non-multilib |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_gcc_check--master-arm |
success
|
Testing passed
|
rivoscibot/toolchain-ci-rivos-test |
success
|
Testing passed
|
linaro-tcwg-bot/tcwg_gcc_check--master-aarch64 |
fail
|
Patch failed to apply
|
Commit Message
Hi,
three-operand instructions like vmacc are modeled with an implicit
output reload when the output does not match one of the operands. For
this we use vmv.v.v which is subject to length masking.
In a situation where the current vl is less than the full vlenb
and the fma's result value is used as input for a vector reduction
(which is never length masked) we effectively only reduce vl
elements. The masked-out elements are relevant for the
reduction, though, leading to a wrong result.
This patch replaces the vmv reloads by full-register reloads.
Regtested on rv64, rv32 is running.
Regards
Robin
gcc/ChangeLog:
PR target/114200
PR target/114202
* config/riscv/vector.md: Use vmv[1248]r.v instead of vmv.v.v.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/autovec/pr114200.c: New test.
* gcc.target/riscv/rvv/autovec/pr114202.c: New test.
---
gcc/config/riscv/vector.md | 96 +++++++++----------
.../gcc.target/riscv/rvv/autovec/pr114200.c | 18 ++++
.../gcc.target/riscv/rvv/autovec/pr114202.c | 20 ++++
3 files changed, 86 insertions(+), 48 deletions(-)
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114200.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114202.c
Comments
LGTM
juzhe.zhong@rivai.ai
From: Robin Dapp
Date: 2024-03-06 21:44
To: gcc-patches; palmer; Kito Cheng; juzhe.zhong@rivai.ai
CC: rdapp.gcc; jeffreyalaw
Subject: [PATCH] RISC-V: Use vmv1r.v instead of vmv.v.v for fma output reloads [PR114200].
Hi,
three-operand instructions like vmacc are modeled with an implicit
output reload when the output does not match one of the operands. For
this we use vmv.v.v which is subject to length masking.
In a situation where the current vl is less than the full vlenb
and the fma's result value is used as input for a vector reduction
(which is never length masked) we effectively only reduce vl
elements. The masked-out elements are relevant for the
reduction, though, leading to a wrong result.
This patch replaces the vmv reloads by full-register reloads.
Regtested on rv64, rv32 is running.
Regards
Robin
gcc/ChangeLog:
PR target/114200
PR target/114202
* config/riscv/vector.md: Use vmv[1248]r.v instead of vmv.v.v.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/autovec/pr114200.c: New test.
* gcc.target/riscv/rvv/autovec/pr114202.c: New test.
---
gcc/config/riscv/vector.md | 96 +++++++++----------
.../gcc.target/riscv/rvv/autovec/pr114200.c | 18 ++++
.../gcc.target/riscv/rvv/autovec/pr114202.c | 20 ++++
3 files changed, 86 insertions(+), 48 deletions(-)
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114200.c
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114202.c
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index f89f9c2fa86..8b1c24c5d79 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -5351,10 +5351,10 @@ (define_insn "*pred_mul_plus<mode>_undef"
"@
vmadd.vv\t%0,%4,%5%p1
vmacc.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%3,%4%p1
+ vmv%m4r.v\t%0,%4\;vmacc.vv\t%0,%3,%4%p1
vmadd.vv\t%0,%4,%5%p1
vmacc.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%5\;vmacc.vv\t%0,%3,%4%p1"
+ vmv%m5r.v\t%0,%5\;vmacc.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")])
@@ -5378,9 +5378,9 @@ (define_insn "*pred_madd<mode>"
"TARGET_VECTOR"
"@
vmadd.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1
+ vmv%m2r.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1
vmadd.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1"
+ vmv%m2r.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5409,9 +5409,9 @@ (define_insn "*pred_macc<mode>"
"TARGET_VECTOR"
"@
vmacc.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4;vmacc.vv\t%0,%2,%3%p1
vmacc.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5462,9 +5462,9 @@ (define_insn "*pred_madd<mode>_scalar"
"TARGET_VECTOR"
"@
vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1
vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -5494,9 +5494,9 @@ (define_insn "*pred_macc<mode>_scalar"
"TARGET_VECTOR"
"@
vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5562,9 +5562,9 @@ (define_insn "*pred_madd<mode>_extended_scalar"
"TARGET_VECTOR && !TARGET_64BIT"
"@
vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
+ vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
+ vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -5595,9 +5595,9 @@ (define_insn "*pred_macc<mode>_extended_scalar"
"TARGET_VECTOR && !TARGET_64BIT"
"@
vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5649,10 +5649,10 @@ (define_insn "*pred_minus_mul<mode>_undef"
"@
vnmsub.vv\t%0,%4,%5%p1
vnmsac.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1
+ vmv%m3r.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1
vnmsub.vv\t%0,%4,%5%p1
vnmsac.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1"
+ vmv%m3r.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")])
@@ -5676,9 +5676,9 @@ (define_insn "*pred_nmsub<mode>"
"TARGET_VECTOR"
"@
vnmsub.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1
+ vmv%m2r.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1
vnmsub.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1"
+ vmv%m2r.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5707,9 +5707,9 @@ (define_insn "*pred_nmsac<mode>"
"TARGET_VECTOR"
"@
vnmsac.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1
vnmsac.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5760,9 +5760,9 @@ (define_insn "*pred_nmsub<mode>_scalar"
"TARGET_VECTOR"
"@
vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -5792,9 +5792,9 @@ (define_insn "*pred_nmsac<mode>_scalar"
"TARGET_VECTOR"
"@
vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5860,9 +5860,9 @@ (define_insn "*pred_nmsub<mode>_extended_scalar"
"TARGET_VECTOR && !TARGET_64BIT"
"@
vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -5893,9 +5893,9 @@ (define_insn "*pred_nmsac<mode>_extended_scalar"
"TARGET_VECTOR && !TARGET_64BIT"
"@
vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6555,10 +6555,10 @@ (define_insn "*pred_mul_<optab><mode>_undef"
"@
vf<madd_msub>.vv\t%0,%4,%5%p1
vf<macc_msac>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1
+ vmv%m3r.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1
vf<madd_msub>.vv\t%0,%4,%5%p1
vf<macc_msac>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1"
+ vmv%m3r.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6586,9 +6586,9 @@ (define_insn "*pred_<madd_msub><mode>"
"TARGET_VECTOR"
"@
vf<madd_msub>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1
+ vmv%m2r.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1
vf<madd_msub>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1"
+ vmv%m2r.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -6621,9 +6621,9 @@ (define_insn "*pred_<macc_msac><mode>"
"TARGET_VECTOR"
"@
vf<macc_msac>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1
vf<macc_msac>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6680,9 +6680,9 @@ (define_insn "*pred_<madd_msub><mode>_scalar"
"TARGET_VECTOR"
"@
vf<madd_msub>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1
vf<madd_msub>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -6716,9 +6716,9 @@ (define_insn "*pred_<macc_msac><mode>_scalar"
"TARGET_VECTOR"
"@
vf<macc_msac>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1
vf<macc_msac>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6778,10 +6778,10 @@ (define_insn "*pred_mul_neg_<optab><mode>_undef"
"@
vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
vf<nmsac_nmacc>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
+ vmv%m3r.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
vf<nmsac_nmacc>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1"
+ vmv%m3r.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6810,9 +6810,9 @@ (define_insn "*pred_<nmsub_nmadd><mode>"
"TARGET_VECTOR"
"@
vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
+ vmv%m2r.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1"
+ vmv%m2r.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -6846,9 +6846,9 @@ (define_insn "*pred_<nmsac_nmacc><mode>"
"TARGET_VECTOR"
"@
vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6907,9 +6907,9 @@ (define_insn "*pred_<nmsub_nmadd><mode>_scalar"
"TARGET_VECTOR"
"@
vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -6944,9 +6944,9 @@ (define_insn "*pred_<nmsac_nmacc><mode>_scalar"
"TARGET_VECTOR"
"@
vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114200.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114200.c
new file mode 100644
index 00000000000..23e37ca9b9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114200.c
@@ -0,0 +1,18 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-options { -march=rv64gcv -mabi=lp64d -O3 -fwrapv } } */
+
+short a, e = 1;
+_Bool b, d;
+short c[300];
+
+int main() {
+ for (int f = 0; f < 19; f++) {
+ for (int g = 0; g < 14; g++)
+ for (int h = 0; h < 10; h++)
+ a += c[g] + e;
+ b += d;
+ }
+
+ if (a != 2660)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114202.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114202.c
new file mode 100644
index 00000000000..f743b08b7af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr114202.c
@@ -0,0 +1,20 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-options { -march=rv64gcv -mabi=lp64d -O3 -fwrapv } } */
+
+signed char a = 0, d = 0;
+_Bool b;
+signed char c[324];
+int e;
+
+int main() {
+ c[63] = 50;
+ for (int f = 0; f < 9; f++) {
+ for (unsigned g = 0; g < 12; g++)
+ for (char h = 0; h < 8; h++)
+ e = a += c[g * 9];
+ b = e ? d : 0;
+ }
+
+ if (a != 16)
+ __builtin_abort ();
+}
--
2.43.2
@@ -5351,10 +5351,10 @@ (define_insn "*pred_mul_plus<mode>_undef"
"@
vmadd.vv\t%0,%4,%5%p1
vmacc.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%3,%4%p1
+ vmv%m4r.v\t%0,%4\;vmacc.vv\t%0,%3,%4%p1
vmadd.vv\t%0,%4,%5%p1
vmacc.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%5\;vmacc.vv\t%0,%3,%4%p1"
+ vmv%m5r.v\t%0,%5\;vmacc.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")])
@@ -5378,9 +5378,9 @@ (define_insn "*pred_madd<mode>"
"TARGET_VECTOR"
"@
vmadd.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1
+ vmv%m2r.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1
vmadd.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1"
+ vmv%m2r.v\t%0,%2\;vmadd.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5409,9 +5409,9 @@ (define_insn "*pred_macc<mode>"
"TARGET_VECTOR"
"@
vmacc.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4;vmacc.vv\t%0,%2,%3%p1
vmacc.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vmacc.vv\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5462,9 +5462,9 @@ (define_insn "*pred_madd<mode>_scalar"
"TARGET_VECTOR"
"@
vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1
vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -5494,9 +5494,9 @@ (define_insn "*pred_macc<mode>_scalar"
"TARGET_VECTOR"
"@
vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5562,9 +5562,9 @@ (define_insn "*pred_madd<mode>_extended_scalar"
"TARGET_VECTOR && !TARGET_64BIT"
"@
vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
+ vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
vmadd.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
+ vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -5595,9 +5595,9 @@ (define_insn "*pred_macc<mode>_extended_scalar"
"TARGET_VECTOR && !TARGET_64BIT"
"@
vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
vmacc.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5649,10 +5649,10 @@ (define_insn "*pred_minus_mul<mode>_undef"
"@
vnmsub.vv\t%0,%4,%5%p1
vnmsac.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1
+ vmv%m3r.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1
vnmsub.vv\t%0,%4,%5%p1
vnmsac.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1"
+ vmv%m3r.v\t%0,%3\;vnmsub.vv\t%0,%4,%5%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")])
@@ -5676,9 +5676,9 @@ (define_insn "*pred_nmsub<mode>"
"TARGET_VECTOR"
"@
vnmsub.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1
+ vmv%m2r.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1
vnmsub.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1"
+ vmv%m2r.v\t%0,%2\;vnmsub.vv\t%0,%3,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -5707,9 +5707,9 @@ (define_insn "*pred_nmsac<mode>"
"TARGET_VECTOR"
"@
vnmsac.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1
vnmsac.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vnmsac.vv\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5760,9 +5760,9 @@ (define_insn "*pred_nmsub<mode>_scalar"
"TARGET_VECTOR"
"@
vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -5792,9 +5792,9 @@ (define_insn "*pred_nmsac<mode>_scalar"
"TARGET_VECTOR"
"@
vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -5860,9 +5860,9 @@ (define_insn "*pred_nmsub<mode>_extended_scalar"
"TARGET_VECTOR && !TARGET_64BIT"
"@
vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
vnmsub.vx\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -5893,9 +5893,9 @@ (define_insn "*pred_nmsac<mode>_extended_scalar"
"TARGET_VECTOR && !TARGET_64BIT"
"@
vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
vnmsac.vx\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
[(set_attr "type" "vimuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6555,10 +6555,10 @@ (define_insn "*pred_mul_<optab><mode>_undef"
"@
vf<madd_msub>.vv\t%0,%4,%5%p1
vf<macc_msac>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1
+ vmv%m3r.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1
vf<madd_msub>.vv\t%0,%4,%5%p1
vf<macc_msac>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1"
+ vmv%m3r.v\t%0,%3\;vf<madd_msub>.vv\t%0,%4,%5%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6586,9 +6586,9 @@ (define_insn "*pred_<madd_msub><mode>"
"TARGET_VECTOR"
"@
vf<madd_msub>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1
+ vmv%m2r.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1
vf<madd_msub>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1"
+ vmv%m2r.v\t%0,%2\;vf<madd_msub>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -6621,9 +6621,9 @@ (define_insn "*pred_<macc_msac><mode>"
"TARGET_VECTOR"
"@
vf<macc_msac>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1
vf<macc_msac>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vf<macc_msac>.vv\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6680,9 +6680,9 @@ (define_insn "*pred_<madd_msub><mode>_scalar"
"TARGET_VECTOR"
"@
vf<madd_msub>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1
vf<madd_msub>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vf<madd_msub>.vf\t%0,%2,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -6716,9 +6716,9 @@ (define_insn "*pred_<macc_msac><mode>_scalar"
"TARGET_VECTOR"
"@
vf<macc_msac>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1
vf<macc_msac>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vf<macc_msac>.vf\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6778,10 +6778,10 @@ (define_insn "*pred_mul_neg_<optab><mode>_undef"
"@
vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
vf<nmsac_nmacc>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
+ vmv%m3r.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
vf<nmsub_nmadd>.vv\t%0,%4,%5%p1
vf<nmsac_nmacc>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1"
+ vmv%m3r.v\t%0,%3\;vf<nmsub_nmadd>.vv\t%0,%4,%5%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set (attr "frm_mode")
@@ -6810,9 +6810,9 @@ (define_insn "*pred_<nmsub_nmadd><mode>"
"TARGET_VECTOR"
"@
vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
+ vmv%m2r.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
vf<nmsub_nmadd>.vv\t%0,%3,%4%p1
- vmv.v.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1"
+ vmv%m2r.v\t%0,%2\;vf<nmsub_nmadd>.vv\t%0,%3,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "2")
@@ -6846,9 +6846,9 @@ (define_insn "*pred_<nmsac_nmacc><mode>"
"TARGET_VECTOR"
"@
vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
vf<nmsac_nmacc>.vv\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vf<nmsac_nmacc>.vv\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
@@ -6907,9 +6907,9 @@ (define_insn "*pred_<nmsub_nmadd><mode>_scalar"
"TARGET_VECTOR"
"@
vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
+ vmv%m3r.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
vf<nmsub_nmadd>.vf\t%0,%2,%4%p1
- vmv.v.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1"
+ vmv%m3r.v\t%0,%3\;vf<nmsub_nmadd>.vf\t%0,%2,%4%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "3")
@@ -6944,9 +6944,9 @@ (define_insn "*pred_<nmsac_nmacc><mode>_scalar"
"TARGET_VECTOR"
"@
vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
+ vmv%m4r.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
vf<nmsac_nmacc>.vf\t%0,%2,%3%p1
- vmv.v.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1"
+ vmv%m4r.v\t%0,%4\;vf<nmsac_nmacc>.vf\t%0,%2,%3%p1"
[(set_attr "type" "vfmuladd")
(set_attr "mode" "<MODE>")
(set_attr "merge_op_idx" "4")
new file mode 100644
@@ -0,0 +1,18 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-options { -march=rv64gcv -mabi=lp64d -O3 -fwrapv } } */
+
+short a, e = 1;
+_Bool b, d;
+short c[300];
+
+int main() {
+ for (int f = 0; f < 19; f++) {
+ for (int g = 0; g < 14; g++)
+ for (int h = 0; h < 10; h++)
+ a += c[g] + e;
+ b += d;
+ }
+
+ if (a != 2660)
+ __builtin_abort ();
+}
new file mode 100644
@@ -0,0 +1,20 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-options { -march=rv64gcv -mabi=lp64d -O3 -fwrapv } } */
+
+signed char a = 0, d = 0;
+_Bool b;
+signed char c[324];
+int e;
+
+int main() {
+ c[63] = 50;
+ for (int f = 0; f < 9; f++) {
+ for (unsigned g = 0; g < 12; g++)
+ for (char h = 0; h < 8; h++)
+ e = a += c[g * 9];
+ b = e ? d : 0;
+ }
+
+ if (a != 16)
+ __builtin_abort ();
+}