diff mbox

[1/2] Add rdrand/rdseed record/replay tests

Message ID 1485451129-16439-1-git-send-email-lgustavo@codesourcery.com
State New
Headers show

Commit Message

Luis Machado Jan. 26, 2017, 5:18 p.m. UTC
This patch adds rdrand/rdseed record/replay tests.

With an unsupported architecture, this should yield the following testsuite
results:

    === gdb Summary ===

# of expected passes	1
# of untested testcases	  1

gdb/testsuite/ChangeLog:

2017-01-26  Luis Machado  <lgustavo@codesourcery.com>

	* gdb.reverse/insn-reverse.c: Include insn-reverse-x86.c.
	* gdb.reverse/insn-reverse-x86.c: New file.
---
 gdb/testsuite/gdb.reverse/insn-reverse-x86.c | 261 +++++++++++++++++++++++++++
 gdb/testsuite/gdb.reverse/insn-reverse.c     |   2 +
 2 files changed, 263 insertions(+)
 create mode 100644 gdb/testsuite/gdb.reverse/insn-reverse-x86.c

Comments

Luis Machado Jan. 26, 2017, 5:23 p.m. UTC | #1
On 01/26/2017 11:18 AM, Luis Machado wrote:
> This patch adds rdrand/rdseed record/replay tests.
>
> With an unsupported architecture, this should yield the following testsuite
> results:
>
>     === gdb Summary ===
>
> # of expected passes	1
> # of untested testcases	  1
>

To clarify, "unsupported" as in "there are no insn-reverse.exp" tests 
for this architecture.

If gdb doesn't know how to reverse step/continue over rdrand/rdseed, we 
will get these:

		=== gdb Summary ===

# of expected passes		9
# of unexpected failures	996
Pedro Alves Feb. 1, 2017, 11:09 a.m. UTC | #2
On 01/26/2017 05:18 PM, Luis Machado wrote:

> +/* Fetches information from cpuid and sets flags indicating supported
> +   features.  */

What flags?

> +
> +static unsigned int
> +check_cpu_support (void)
> +{
> +  unsigned int rdrand_mask = (1 << 30);
> +  unsigned int eax, ebx, ecx, edx;
> +
> +  __get_cpuid(1, &eax, &ebx, &ecx, &edx);

Space before parens.

> +  return ((ecx & rdrand_mask) == rdrand_mask);
> +}
> +

Thanks,
Pedro Alves
Pedro Alves Feb. 1, 2017, 11:10 a.m. UTC | #3
On 01/26/2017 05:23 PM, Luis Machado wrote:
> On 01/26/2017 11:18 AM, Luis Machado wrote:
>> This patch adds rdrand/rdseed record/replay tests.
>>
>> With an unsupported architecture, this should yield the following
>> testsuite
>> results:
>>
>>     === gdb Summary ===
>>
>> # of expected passes    1
>> # of untested testcases      1
>>
> 
> To clarify, "unsupported" as in "there are no insn-reverse.exp" tests
> for this architecture.
> 
> If gdb doesn't know how to reverse step/continue over rdrand/rdseed, we
> will get these:
> 
>         === gdb Summary ===
> 
> # of expected passes        9
> # of unexpected failures    996

Please remember to push the GDB fix before the test, or squash them,
in order to avoid introducing known-failures, to help with bisects,
and the buildbots.

Thanks,
Pedro Alves
Luis Machado Feb. 1, 2017, 12:42 p.m. UTC | #4
On 02/01/2017 05:09 AM, Pedro Alves wrote:
> On 01/26/2017 05:18 PM, Luis Machado wrote:
>
>> +/* Fetches information from cpuid and sets flags indicating supported
>> +   features.  */
>
> What flags?
>

This is stale. The function should actually be called 
check_rdrand_support, so that's what it does. In my previous design it 
checked for a number of features, but i've trimmed it down.

>> +
>> +static unsigned int
>> +check_cpu_support (void)
>> +{
>> +  unsigned int rdrand_mask = (1 << 30);
>> +  unsigned int eax, ebx, ecx, edx;
>> +
>> +  __get_cpuid(1, &eax, &ebx, &ecx, &edx);
>

Fixed locally.
diff mbox

Patch

diff --git a/gdb/testsuite/gdb.reverse/insn-reverse-x86.c b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c
new file mode 100644
index 0000000..8d1fbbf
--- /dev/null
+++ b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c
@@ -0,0 +1,261 @@ 
+/* This testcase is part of GDB, the GNU debugger.
+
+   Copyright 2017 Free Software Foundation, Inc.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#include <cpuid.h>
+#include <stdint.h>
+
+/* 0 if the CPU supports rdrand/rdseed and non-zero otherwise.  */
+static unsigned int supports_rdrand;
+
+/* Fetches information from cpuid and sets flags indicating supported
+   features.  */
+
+static unsigned int
+check_cpu_support (void)
+{
+  unsigned int rdrand_mask = (1 << 30);
+  unsigned int eax, ebx, ecx, edx;
+
+  __get_cpuid(1, &eax, &ebx, &ecx, &edx);
+  return ((ecx & rdrand_mask) == rdrand_mask);
+}
+
+/* Test rdrand support for various output registers.  */
+
+void
+rdrand (void)
+{
+  /* Get a random number from the rdrand assembly instruction.  */
+  register uint64_t number;
+
+  if (!supports_rdrand)
+    return;
+
+  /* 16-bit random numbers.  */
+  __asm__ volatile ("rdrand %%ax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%bx;" : "=r" (number));
+  __asm__ volatile ("rdrand %%cx;" : "=r" (number));
+  __asm__ volatile ("rdrand %%dx;" : "=r" (number));
+
+  __asm__ volatile ("mov %%di, %%ax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%di;" : "=r" (number));
+  __asm__ volatile ("mov %%ax, %%di;" : "=r" (number));
+
+  __asm__ volatile ("mov %%si, %%ax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%si;" : "=r" (number));
+  __asm__ volatile ("mov %%ax, %%si;" : "=r" (number));
+
+  __asm__ volatile ("mov %%bp, %%ax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%bp;" : "=r" (number));
+  __asm__ volatile ("mov %%ax, %%bp;" : "=r" (number));
+
+  __asm__ volatile ("mov %%sp, %%ax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%sp;" : "=r" (number));
+  __asm__ volatile ("mov %%ax, %%sp;" : "=r" (number));
+
+  __asm__ volatile ("rdrand %%r8w;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r9w;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r10w;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r11w;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r12w;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r13w;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r14w;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r15w;" : "=r" (number));
+
+  /* 32-bit random numbers.  */
+  __asm__ volatile ("rdrand %%eax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%ebx;" : "=r" (number));
+  __asm__ volatile ("rdrand %%ecx;" : "=r" (number));
+  __asm__ volatile ("rdrand %%edx;" : "=r" (number));
+
+  __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%edi;" : "=r" (number));
+  __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number));
+
+  __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%esi;" : "=r" (number));
+  __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number));
+
+  __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%ebp;" : "=r" (number));
+  __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number));
+
+  __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%esp;" : "=r" (number));
+  __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number));
+
+  __asm__ volatile ("rdrand %%r8d;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r9d;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r10d;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r11d;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r12d;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r13d;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r14d;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r15d;" : "=r" (number));
+
+  /* 64-bit random numbers.  */
+  __asm__ volatile ("rdrand %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%rbx;" : "=r" (number));
+  __asm__ volatile ("rdrand %%rcx;" : "=r" (number));
+  __asm__ volatile ("rdrand %%rdx;" : "=r" (number));
+
+  __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%rdi;" : "=r" (number));
+  __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number));
+
+  __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%rsi;" : "=r" (number));
+  __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number));
+
+  __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%rbp;" : "=r" (number));
+  __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number));
+
+  __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number));
+  __asm__ volatile ("rdrand %%rsp;" : "=r" (number));
+  __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number));
+
+  __asm__ volatile ("rdrand %%r8;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r9;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r10;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r11;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r12;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r13;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r14;" : "=r" (number));
+  __asm__ volatile ("rdrand %%r15;" : "=r" (number));
+}
+
+/* Test rdseed support for various output registers.  */
+
+void
+rdseed (void)
+{
+  /* Get a random seed from the rdseed assembly instruction.  */
+  register long seed;
+
+  if (!supports_rdrand)
+    return;
+
+  /* 16-bit random seeds.  */
+  __asm__ volatile ("rdseed %%ax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%bx;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%cx;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%dx;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%di, %%ax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%di;" : "=r" (seed));
+  __asm__ volatile ("mov %%ax, %%di;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%si, %%ax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%si;" : "=r" (seed));
+  __asm__ volatile ("mov %%ax, %%si;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%bp, %%ax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%bp;" : "=r" (seed));
+  __asm__ volatile ("mov %%ax, %%bp;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%sp, %%ax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%sp;" : "=r" (seed));
+  __asm__ volatile ("mov %%ax, %%sp;" : "=r" (seed));
+
+  __asm__ volatile ("rdseed %%r8w;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r9w;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r10w;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r11w;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r12w;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r13w;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r14w;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r15w;" : "=r" (seed));
+
+  /* 32-bit random seeds.  */
+  __asm__ volatile ("rdseed %%eax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%ebx;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%ecx;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%edx;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%edi;" : "=r" (seed));
+  __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%esi;" : "=r" (seed));
+  __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%ebp;" : "=r" (seed));
+  __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%esp;" : "=r" (seed));
+  __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed));
+
+  __asm__ volatile ("rdseed %%r8d;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r9d;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r10d;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r11d;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r12d;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r13d;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r14d;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r15d;" : "=r" (seed));
+
+  /* 64-bit random seeds.  */
+  __asm__ volatile ("rdseed %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%rbx;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%rcx;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%rdx;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%rdi;" : "=r" (seed));
+  __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%rsi;" : "=r" (seed));
+  __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%rbp;" : "=r" (seed));
+  __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed));
+
+  __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%rsp;" : "=r" (seed));
+  __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed));
+
+  __asm__ volatile ("rdseed %%r8;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r9;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r10;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r11;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r12;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r13;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r14;" : "=r" (seed));
+  __asm__ volatile ("rdseed %%r15;" : "=r" (seed));
+}
+
+/* Initialize arch-specific bits.  */
+
+static void
+initialize (void)
+{
+  /* Check for rdseed/rdseed support.  */
+  supports_rdrand = check_cpu_support ();
+}
+
+/* Functions testing instruction decodings.  GDB will test all of these.  */
+static testcase_ftype testcases[] =
+{
+  rdrand,
+  rdseed
+};
diff --git a/gdb/testsuite/gdb.reverse/insn-reverse.c b/gdb/testsuite/gdb.reverse/insn-reverse.c
index 662a02b..26a22a9 100644
--- a/gdb/testsuite/gdb.reverse/insn-reverse.c
+++ b/gdb/testsuite/gdb.reverse/insn-reverse.c
@@ -24,6 +24,8 @@  typedef void (*testcase_ftype) (void);
 #include "insn-reverse-aarch64.c"
 #elif (defined __arm__)
 #include "insn-reverse-arm.c"
+#elif (defined __x86_64__) || (defined __i386__)
+#include "insn-reverse-x86.c"
 #else
 /* We get here if the current architecture being tested doesn't have any
    record/replay instruction decoding tests implemented.  */