gdb/testsuite: fix gdb.reverse/i386-avx-reverse.exp with clang

Message ID 20241030193123.3086112-1-guinevere@redhat.com
State New
Headers
Series gdb/testsuite: fix gdb.reverse/i386-avx-reverse.exp with clang |

Checks

Context Check Description
linaro-tcwg-bot/tcwg_gdb_build--master-aarch64 success Build passed
linaro-tcwg-bot/tcwg_gdb_build--master-arm success Build passed
linaro-tcwg-bot/tcwg_gdb_check--master-arm success Test passed
linaro-tcwg-bot/tcwg_gdb_check--master-aarch64 success Test passed

Commit Message

Guinevere Larsen Oct. 30, 2024, 7:31 p.m. UTC
  The test gdb.reverse/i386-avx-reverse.exp was changed by the recent
commit:

    commit 5bf288d5a88ab6d3fa9bd7bd070e624afd264dc6
    Author: Guinevere Larsen <guinevere@redhat.com>
    Date:   Fri Jul 26 17:31:14 2024 -0300

    gdb/record: support AVX instructions VMOVDQ(U|A) when recording

In that commit I added a few calls to the instruction vmovdqa to and
from memory addresses. Because my local gcc testing always had aligned
pointers, I thought this would always work, but clang (and maybe other
compilers) might not do the same, which will cause vmovdqa to segfault,
and the test to fail spectacularly.

This commit fixes that by adding an if condition, checking for alignment
before calling vmovdqa, and if the memory is unaligned, vmovdqu will be
used instead, which - at the time of committing - exercises the same
code path.
---
 gdb/testsuite/gdb.reverse/i386-avx-reverse.c  | 23 ++++++++++++++++---
 .../gdb.reverse/i386-avx-reverse.exp          | 10 ++++++--
 2 files changed, 28 insertions(+), 5 deletions(-)
  

Comments

Tom de Vries Oct. 30, 2024, 8:03 p.m. UTC | #1
On 10/30/24 20:31, Guinevere Larsen wrote:
> The test gdb.reverse/i386-avx-reverse.exp was changed by the recent
> commit:
> 
>      commit 5bf288d5a88ab6d3fa9bd7bd070e624afd264dc6
>      Author: Guinevere Larsen <guinevere@redhat.com>
>      Date:   Fri Jul 26 17:31:14 2024 -0300
> 
>      gdb/record: support AVX instructions VMOVDQ(U|A) when recording
> 
> In that commit I added a few calls to the instruction vmovdqa to and
> from memory addresses. Because my local gcc testing always had aligned
> pointers, I thought this would always work, but clang (and maybe other
> compilers) might not do the same, which will cause vmovdqa to segfault,
> and the test to fail spectacularly.
> 
> This commit fixes that by adding an if condition, checking for alignment
> before calling vmovdqa, and if the memory is unaligned, vmovdqu will be
> used instead, which - at the time of committing - exercises the same
> code path.

Hi Gwen,

I ran into this sort of thing before, and came up with 
precise_aligned_alloc in gdb/testsuite/lib/precise-aligned-alloc.c.

Consider using this to exercise both vmovdqa and vmovdqu, rather than 
one or the other.

Thanks,
- Tom

> ---
>   gdb/testsuite/gdb.reverse/i386-avx-reverse.c  | 23 ++++++++++++++++---
>   .../gdb.reverse/i386-avx-reverse.exp          | 10 ++++++--
>   2 files changed, 28 insertions(+), 5 deletions(-)
> 
> diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
> index b36de10ec6f..9ddcf32c011 100644
> --- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
> +++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
> @@ -18,6 +18,11 @@
>   /* Architecture tests for intel i386 platform.  */
>   
>   #include <stdlib.h>
> +#include <stdint.h>
> +
> +/* Calculation of whether the pointer is aligned to a 32 bit boundary.
> +   This is used to verify that vmovdqa won't seg fault.  */
> +#define aligned(pointer) (((uintptr_t)(const void *)(pointer)) % 32 == 0)
>   
>   char global_buf0[] = {0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
>   		      0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
> @@ -91,9 +96,21 @@ vmov_test ()
>     asm volatile ("vmovdqu %%ymm0, %0": "=m"(buf1));
>   
>     /* Operations based on global buffers.  */
> -  /* Global buffers seem to always be aligned, lets sanity check vmovdqa.  */
> -  asm volatile ("vmovdqa %0, %%ymm15": : "m"(global_buf0));
> -  asm volatile ("vmovdqa %%ymm15, %0": "=m"(global_buf1));
> +  /* The instruction vmovdqa requires 2 registers, or a register and a
> +     memory pointer aligned to a 32-bit boundary.  Since recording this
> +     instruction shares the codepath of vmovdqu, technically both branches
> +     of the if condition test the same code, but should things change in
> +     the future, this test will exercise everything necessary.  */
> +  if (aligned(&global_buf0) && aligned(&global_buf1))
> +    {
> +      asm volatile ("vmovdqa %0, %%ymm15": : "m"(global_buf0));
> +      asm volatile ("vmovdqa %%ymm15, %0": "=m"(global_buf1));
> +    }
> +  else
> +    {
> +      asm volatile ("vmovdqu %0, %%ymm15": : "m"(global_buf0));
> +      asm volatile ("vmovdqu %%ymm15, %0": "=m"(global_buf1));
> +    }
>     asm volatile ("vmovdqu %0, %%ymm0": : "m"(global_buf0));
>     asm volatile ("vmovdqu %%ymm0, %0": "=m"(global_buf1));
>   
> diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
> index 4aefbcdbab3..f22f52e4f1a 100644
> --- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
> +++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
> @@ -157,8 +157,14 @@ if {[record_full_function "vmov"] == true} {
>       test_one_register "vmovdqu" "ymm0" \
>   	"0x3f3e3d3c3b3a39383736353433323130, 0x3f3e3d3c3b3a39383736353433323130" \
>   	"global buffer: "
> -    test_one_memory "vmovdqa" "global_buf1" "0x0 .repeats 32 times"
> -    test_one_register "vmovdqa" "ymm15" "0x0, 0x0"
> +    # The next 2 instructions may be vmovdqa or vmovdqu, depending on whether
> +    # the global pointers ended up aligned to a 32 bit boundary.  While
> +    # ideally we'd like to test vmovdqa, it isn't possible to force alignment.
> +    test_one_memory "vmovdq" "global_buf1" "0x0 .repeats 32 times"
> +    test_one_register "vmovdq" "ymm15" "0x0, 0x0"
> +
> +    # Step over the if clause that checks for memory alignment
> +    gdb_test "reverse-next" ".*"
>   
>       test_one_memory "vmovdqu" "buf1" "0x0 .repeats 32 times"
>       test_one_register "vmovdqu" "ymm0" "0x2726252423222120, 0x0" "local buffer: "
  
Guinevere Larsen Oct. 31, 2024, 6:12 p.m. UTC | #2
On 10/30/24 5:03 PM, Tom de Vries wrote:
> On 10/30/24 20:31, Guinevere Larsen wrote:
>> The test gdb.reverse/i386-avx-reverse.exp was changed by the recent
>> commit:
>>
>>      commit 5bf288d5a88ab6d3fa9bd7bd070e624afd264dc6
>>      Author: Guinevere Larsen <guinevere@redhat.com>
>>      Date:   Fri Jul 26 17:31:14 2024 -0300
>>
>>      gdb/record: support AVX instructions VMOVDQ(U|A) when recording
>>
>> In that commit I added a few calls to the instruction vmovdqa to and
>> from memory addresses. Because my local gcc testing always had aligned
>> pointers, I thought this would always work, but clang (and maybe other
>> compilers) might not do the same, which will cause vmovdqa to segfault,
>> and the test to fail spectacularly.
>>
>> This commit fixes that by adding an if condition, checking for alignment
>> before calling vmovdqa, and if the memory is unaligned, vmovdqu will be
>> used instead, which - at the time of committing - exercises the same
>> code path.
>
> Hi Gwen,
>
> I ran into this sort of thing before, and came up with 
> precise_aligned_alloc in gdb/testsuite/lib/precise-aligned-alloc.c.
>
> Consider using this to exercise both vmovdqa and vmovdqu, rather than 
> one or the other.
This is really useful, I didn't know it existed! I'll send a v2 shortly 
using it
  

Patch

diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
index b36de10ec6f..9ddcf32c011 100644
--- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
+++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
@@ -18,6 +18,11 @@ 
 /* Architecture tests for intel i386 platform.  */
 
 #include <stdlib.h>
+#include <stdint.h>
+
+/* Calculation of whether the pointer is aligned to a 32 bit boundary.
+   This is used to verify that vmovdqa won't seg fault.  */
+#define aligned(pointer) (((uintptr_t)(const void *)(pointer)) % 32 == 0)
 
 char global_buf0[] = {0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
 		      0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -91,9 +96,21 @@  vmov_test ()
   asm volatile ("vmovdqu %%ymm0, %0": "=m"(buf1));
 
   /* Operations based on global buffers.  */
-  /* Global buffers seem to always be aligned, lets sanity check vmovdqa.  */
-  asm volatile ("vmovdqa %0, %%ymm15": : "m"(global_buf0));
-  asm volatile ("vmovdqa %%ymm15, %0": "=m"(global_buf1));
+  /* The instruction vmovdqa requires 2 registers, or a register and a
+     memory pointer aligned to a 32-bit boundary.  Since recording this
+     instruction shares the codepath of vmovdqu, technically both branches
+     of the if condition test the same code, but should things change in
+     the future, this test will exercise everything necessary.  */
+  if (aligned(&global_buf0) && aligned(&global_buf1))
+    {
+      asm volatile ("vmovdqa %0, %%ymm15": : "m"(global_buf0));
+      asm volatile ("vmovdqa %%ymm15, %0": "=m"(global_buf1));
+    }
+  else
+    {
+      asm volatile ("vmovdqu %0, %%ymm15": : "m"(global_buf0));
+      asm volatile ("vmovdqu %%ymm15, %0": "=m"(global_buf1));
+    }
   asm volatile ("vmovdqu %0, %%ymm0": : "m"(global_buf0));
   asm volatile ("vmovdqu %%ymm0, %0": "=m"(global_buf1));
 
diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
index 4aefbcdbab3..f22f52e4f1a 100644
--- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
+++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
@@ -157,8 +157,14 @@  if {[record_full_function "vmov"] == true} {
     test_one_register "vmovdqu" "ymm0" \
 	"0x3f3e3d3c3b3a39383736353433323130, 0x3f3e3d3c3b3a39383736353433323130" \
 	"global buffer: "
-    test_one_memory "vmovdqa" "global_buf1" "0x0 .repeats 32 times"
-    test_one_register "vmovdqa" "ymm15" "0x0, 0x0"
+    # The next 2 instructions may be vmovdqa or vmovdqu, depending on whether
+    # the global pointers ended up aligned to a 32 bit boundary.  While
+    # ideally we'd like to test vmovdqa, it isn't possible to force alignment.
+    test_one_memory "vmovdq" "global_buf1" "0x0 .repeats 32 times"
+    test_one_register "vmovdq" "ymm15" "0x0, 0x0"
+
+    # Step over the if clause that checks for memory alignment
+    gdb_test "reverse-next" ".*"
 
     test_one_memory "vmovdqu" "buf1" "0x0 .repeats 32 times"
     test_one_register "vmovdqu" "ymm0" "0x2726252423222120, 0x0" "local buffer: "