new file mode 100755
@@ -0,0 +1,1544 @@
+/* This file is part of GDB, the GNU debugger.
+
+ Copyright 2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#if (__riscv_xlen >= 64)
+#define SKIP_c_flw
+#define SKIP_c_flwsp
+#define SKIP_c_fsw
+#define SKIP_c_fswsp
+#define SKIP_c_jal
+#endif
+
+#if (__riscv_xlen == 32)
+#define SKIP_c_ld
+#define SKIP_c_ldsp
+#define SKIP_c_sd
+#define SKIP_c_sdsp
+#define SKIP_c_addiw
+#define SKIP_c_addw
+#define SKIP_c_subw
+#endif
+
+/* Disable tests that are not implemented in GDB simulator yet. */
+#define DISABLE_GDB_SIM_TESTS
+
+#if defined (DISABLE_GDB_SIM_TESTS)
+#define SKIP_c_flw
+#define SKIP_c_flwsp
+#define SKIP_c_fsw
+#define SKIP_c_fswsp
+#define SKIP_c_fld
+#define SKIP_c_fldsp
+#define SKIP_c_fsd
+#define SKIP_c_fsdsp
+#endif
+
+#define DISABLE_PRINTS
+
+#if defined (DISABLE_PRINTS)
+#define print(...) ;
+#else
+#define print(format, ...) printf (format, __VA_ARGS__)
+#endif
+
+#if (__riscv_xlen >= 64)
+typedef uint64_t riscv_reg_t;
+#elif (__riscv_xlen == 32)
+typedef uint32_t riscv_reg_t;
+#endif
+
+int total_tests = 0;
+int num_fail = 0;
+int num_pass = 0;
+int debug = 0;
+
+void
+i_check (int line, const char *func, uint32_t value1, uint32_t value2)
+{
+ total_tests++;
+ if (value1 != value2)
+ {
+ print (" *** FAIL: %s:%d: (%d) != (%d)\n", func, line, value1, value2);
+ num_fail++;
+ }
+ else
+ {
+ num_pass ++;
+ if (debug)
+ print (" PASS: %s:%d\n", func, line);
+ }
+}
+
+void
+l_check (int line, const char *func, uint64_t value1, uint64_t value2)
+{
+ total_tests++;
+ if (value1 != value2)
+ {
+ print (" *** FAIL: %s:%d: (0x%lx) != (0x%lx)\n", func, line, value1,
+ value2);
+ num_fail++;
+ }
+ else
+ {
+ num_pass++;
+ if (debug)
+ print (" PASS: %s:%d\n", func, line);
+ }
+}
+
+void
+f_check (int line, const char *func, float value1, float value2)
+{
+ total_tests++;
+ if (value1 != value2)
+ {
+ print (" *** FAIL: %s:%d: (%ff) != (%ff)\n", func, line, value1,
+ value2);
+ num_fail++;
+ }
+ else
+ {
+ num_pass++;
+ if (debug)
+ print (" PASS: %s:%d\n", func, line);
+ }
+}
+
+void
+d_check (int line, const char *func, double value1, double value2)
+{
+ total_tests++;
+ if (value1 != value2)
+ {
+ print (" *** FAIL: %s:%d: (%lf) != (%lf)\n", func, line, value1,
+ value2);
+ num_fail++;
+ }
+ else
+ {
+ num_pass++;
+ if (debug)
+ print (" PASS: %s:%d\n", func, line);
+ }
+}
+
+void
+fail (int line, const char *func, const char *msg)
+{
+ total_tests++;
+ print (" *** FAIL: %s:%d: (%s)\n", func, line, msg);
+ num_fail++;
+}
+
+void
+info (const char *str)
+{
+ print ("%s\n", str);
+}
+
+#define I_CHECK(VAL1, VAL2) i_check (__LINE__, __FUNCTION__, (VAL1), (VAL2));
+#define L_CHECK(VAL1, VAL2) l_check (__LINE__, __FUNCTION__, (VAL1), (VAL2));
+#define F_CHECK(VAL1, VAL2) f_check (__LINE__, __FUNCTION__, (VAL1), (VAL2));
+#define D_CHECK(VAL1, VAL2) d_check (__LINE__, __FUNCTION__, (VAL1), (VAL2));
+#define FAIL(STR) fail (__LINE__, __FUNCTION__, (STR));
+
+void
+test_c_lwsp ()
+{
+ volatile uint32_t on_stack[] = { 0x1111, 0x2222, 0x3333, 0x4444 };
+ uint32_t a = 0, offset = 0;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.lwsp");
+
+ /* Find offset of on_stack. */
+ asm volatile ("c.mv %0,sp" : "=r" (stack_ptr));
+ var_ptr = (riscv_reg_t) &on_stack[0];
+ offset = var_ptr - stack_ptr;
+ (void) offset;
+
+ if (offset == 0)
+ {
+ asm volatile ("c.lwsp %0,0(sp)" : "=r" (a));
+ I_CHECK (a, 0x1111);
+
+ asm volatile ("c.lwsp %0,4(sp)" : "=r" (a));
+ I_CHECK (a, 0x2222);
+
+ asm volatile ("c.lwsp %0,8(sp)" : "=r" (a));
+ I_CHECK (a, 0x3333);
+
+ asm volatile ("c.lwsp %0,12(sp)" : "=r" (a));
+ I_CHECK (a, 0x4444);
+ }
+ else if (offset == 8)
+ {
+ asm volatile ("c.lwsp %0,8(sp)" : "=r" (a));
+ I_CHECK (a, 0x1111);
+
+ asm volatile ("c.lwsp %0,12(sp)" : "=r" (a));
+ I_CHECK (a, 0x2222);
+
+ asm volatile ("c.lwsp %0,16(sp)" : "=r" (a));
+ I_CHECK (a, 0x3333);
+
+ asm volatile ("c.lwsp %0,20(sp)" : "=r" (a));
+ I_CHECK (a, 0x4444);
+ }
+}
+
+void
+test_c_ldsp ()
+{
+#if defined (SKIP_c_ldsp)
+ info ("--- Disable c.ldsp");
+#else
+ volatile uint64_t on_stack[] = { 0x11112222, 0x33334444 };
+ uint64_t a = 0, offset = 0;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.ldsp");
+
+ /* Find offset of on_stack. */
+ asm volatile ("c.mv %0,sp" : "=r" (stack_ptr));
+ var_ptr = (riscv_reg_t) &on_stack[0];
+ offset = var_ptr - stack_ptr;
+ (void) offset;
+
+ asm volatile ("c.ldsp %0,0(sp)" : "=r" (a));
+ L_CHECK (a, 0x11112222ul);
+
+ asm volatile ("c.ldsp %0,8(sp)" : "=r" (a));
+ L_CHECK (a, 0x33334444ul);
+#endif
+}
+
+void
+test_c_flwsp ()
+{
+#if defined (SKIP_c_flwsp)
+ info ("--- Disable c.flwsp");
+#else
+ volatile float on_stack[] = { 1.23f, 3.14f, -5.6f, 10.9f };
+ float a;
+ uint32_t offset = 0;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.flwsp");
+
+ /* Find offset of on_stack. */
+ asm volatile ("c.mv %0,sp" : "=r" (stack_ptr));
+ var_ptr = (riscv_reg_t) &on_stack[0];
+ offset = var_ptr - stack_ptr;
+ (void) offset;
+
+ if (offset == 0)
+ {
+ asm volatile ("c.flwsp %0,0(sp)" : "=f" (a));
+ F_CHECK (a, 1.23f);
+
+ asm volatile ("c.flwsp %0,4(sp)" : "=f" (a));
+ F_CHECK (a, 3.14f);
+
+ asm volatile ("c.flwsp %0,8(sp)" : "=f" (a));
+ F_CHECK (a, -5.6f);
+
+ asm volatile ("c.flwsp %0,12(sp)" : "=f" (a));
+ F_CHECK (a, 10.9f);
+ }
+ else if (offset == 8)
+ {
+ asm volatile ("c.flwsp %0,8(sp)" : "=f" (a));
+ F_CHECK (a, 1.23f);
+
+ asm volatile ("c.flwsp %0,12(sp)" : "=f" (a));
+ F_CHECK (a, 3.14f);
+
+ asm volatile ("c.flwsp %0,16(sp)" : "=f" (a));
+ F_CHECK (a, -5.6f);
+
+ asm volatile ("c.flwsp %0,20(sp)" : "=f" (a));
+ F_CHECK (a, 10.9f);
+ }
+#endif
+}
+
+void
+test_c_fldsp ()
+{
+#if defined (SKIP_c_fldsp)
+ info ("--- Disable c.fldsp");
+#else
+ volatile double on_stack[] = { 1.23, -5.89 };
+ double a = 0;
+ uint32_t offset = 0;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.fldsp");
+
+ /* Find offset of on_stack. */
+ asm volatile ("c.mv %0,sp" : "=r" (stack_ptr));
+ var_ptr = (riscv_reg_t) &on_stack[0];
+ offset = var_ptr - stack_ptr;
+ (void) offset;
+
+ asm volatile ("c.fldsp %0,0(sp)" : "=f" (a));
+ D_CHECK (a, 1.23);
+
+ asm volatile ("c.fldsp %0,8(sp)" : "=f" (a));
+ D_CHECK (a, -5.89);
+#endif
+}
+
+void
+test_c_swsp ()
+{
+ volatile uint32_t on_stack[] = { 0x1111, 0x2222, 0x3333, 0x4444 };
+ uint32_t a, offset = 0;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.swsp");
+
+ /* Find offset of on_stack. */
+ asm volatile ("c.mv %0,sp" : "=r" (stack_ptr));
+ var_ptr = (riscv_reg_t) &on_stack[0];
+ offset = var_ptr - stack_ptr;
+
+ if (offset == 0)
+ {
+ a = 0xbeef;
+ asm volatile ("c.swsp %0,0(sp)" : : "r" (a));
+
+ a = 0xdead;
+ asm volatile ("c.swsp %0,4(sp)" : : "r" (a));
+
+ a = 0xabcd;
+ asm volatile ("c.swsp %0,8(sp)" : : "r" (a));
+
+ a = 0x1298;
+ asm volatile ("c.swsp %0,12(sp)" : : "r" (a));
+ }
+ else if (offset == 8)
+ {
+ a = 0xbeef;
+ asm volatile ("c.swsp %0,8(sp)" : : "r" (a));
+
+ a = 0xdead;
+ asm volatile ("c.swsp %0,12(sp)" : : "r" (a));
+
+ a = 0xabcd;
+ asm volatile ("c.swsp %0,16(sp)" : : "r" (a));
+
+ a = 0x1298;
+ asm volatile ("c.swsp %0,20(sp)" : : "r" (a));
+ }
+ else
+ {
+ FAIL ("Invalid stack offset (expected 0 or 8)");
+ return;
+ }
+
+ I_CHECK (on_stack[0], 0xbeef);
+ I_CHECK (on_stack[1], 0xdead);
+ I_CHECK (on_stack[2], 0xabcd);
+ I_CHECK (on_stack[3], 0x1298);
+}
+
+void
+test_c_sdsp ()
+{
+#if defined (SKIP_c_sdsp)
+ info ("--- Disable c.sdsp");
+#else
+ volatile uint64_t on_stack[] = { 0x11112222, 0x33334444 };
+ uint64_t a = 0, offset = 0;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.sdsp");
+
+ /* Find offset of on_stack. */
+ asm volatile ("c.mv %0,sp" : "=r" (stack_ptr));
+ var_ptr = (riscv_reg_t) &on_stack[0];
+ offset = var_ptr - stack_ptr;
+ (void) offset;
+
+ a = 0xdeadbeef;
+ asm volatile ("c.sdsp %0,0(sp)" : : "r" (a));
+
+ a = 0xabcd1234;
+ asm volatile ("c.sdsp %0,8(sp)" : : "r" (a));
+
+ L_CHECK (on_stack[0], 0xdeadbeef);
+ L_CHECK (on_stack[1], 0xabcd1234);
+#endif
+}
+
+void
+test_c_fswsp ()
+{
+#if defined (SKIP_c_fswsp)
+ info ("--- Disable c.fswsp");
+#else
+ volatile float on_stack[] = { 1.23f, 3.14f, -5.6f, 10.9f };
+ float a;
+ uint32_t offset = 0;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.fswsp");
+
+ /* Find offset of on_stack. */
+ asm volatile ("c.mv %0,sp" : "=r" (stack_ptr));
+ var_ptr = (riscv_reg_t) &on_stack[0];
+ offset = var_ptr - stack_ptr;
+ (void) offset;
+
+ if (offset == 0)
+ {
+ a = -12.5f;
+ asm volatile ("c.fswsp %0,0(sp)" : : "f" (a));
+
+ a = 72.8f;
+ asm volatile ("c.fswsp %0,4(sp)" : "=f" (a));
+
+ a = 0.5f;
+ asm volatile ("c.fswsp %0,8(sp)" : "=f" (a));
+
+ a = 4.7f;
+ asm volatile ("c.fswsp %0,12(sp)" : "=f" (a));
+
+ }
+ else if (offset == 8)
+ {
+ a = -12.5f;
+ asm volatile ("c.fswsp %0,8(sp)" : "=f" (a));
+
+ a = 72.8f;
+ asm volatile ("c.fswsp %0,12(sp)" : "=f" (a));
+
+ a = 0.5f;
+ asm volatile ("c.fswsp %0,16(sp)" : "=f" (a));
+
+ a = 4.7f;
+ asm volatile ("c.fswsp %0,20(sp)" : "=f" (a));
+ }
+ else
+ {
+ FAIL ("Invalid stack offset (expected 0 or 8)");
+ return;
+ }
+
+ F_CHECK (on_stack[0], -12.5f);
+ F_CHECK (on_stack[1], 72.8f);
+ F_CHECK (on_stack[2], 0.5f);
+ F_CHECK (on_stack[3], 4.7f);
+
+#endif
+}
+
+void
+test_c_fsdsp ()
+{
+#if defined (SKIP_c_fsdsp)
+ info ("--- Disable c.fsdsp");
+#else
+ volatile double on_stack[] = { -1.23, 5.89 };
+ double a = 0;
+ uint32_t offset = 0;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.fsdsp");
+
+ /* Find offset of on_stack. */
+ asm volatile ("c.mv %0,sp" : "=r" (stack_ptr));
+ var_ptr = (riscv_reg_t) &on_stack[0];
+ offset = var_ptr - stack_ptr;
+ (void) offset;
+
+ a = 1234.55;
+ asm volatile ("c.fsdsp %0,0(sp)" : : "f" (a));
+
+ a = -7890.15;
+ asm volatile ("c.fsdsp %0,8(sp)" : : "f" (a));
+
+ D_CHECK (on_stack[0], 1234.55);
+ D_CHECK (on_stack[1], -7890.15);
+#endif
+}
+
+void
+test_c_lw ()
+{
+ static uint32_t g_data[] = { 0x1111, 0x2222, 0x3333, 0x4444 };
+ uint32_t a = 0;
+ riscv_reg_t var_ptr;
+
+ info ("Testing c.lw");
+
+ var_ptr = (riscv_reg_t) &g_data[0];
+
+ asm volatile ("c.lw %0,0(%1)" : "=r" (a) : "r" (var_ptr));
+ I_CHECK (a, 0x1111);
+
+ asm volatile ("c.lw %0,4(%1)" : "=r" (a) : "r" (var_ptr));
+ I_CHECK (a, 0x2222);
+
+ asm volatile ("c.lw %0,8(%1)" : "=r" (a) : "r" (var_ptr));
+ I_CHECK (a, 0x3333);
+
+ asm volatile ("c.lw %0,12(%1)" : "=r" (a) : "r" (var_ptr));
+ I_CHECK (a, 0x4444);
+}
+
+void
+test_c_ld ()
+{
+#if defined (SKIP_c_ld)
+ info ("--- Disable c.ld");
+#else
+ static uint64_t g_data[] = { 0x11112222, 0x33334444 };
+ uint32_t a = 0;
+ riscv_reg_t var_ptr;
+
+ info ("Testing c.ld");
+
+ var_ptr = (riscv_reg_t) &g_data[0];
+
+ asm volatile ("c.ld %0,0(%1)" : "=r" (a) : "r" (var_ptr));
+ L_CHECK (a, 0x11112222);
+
+ asm volatile ("c.ld %0,8(%1)" : "=r" (a) : "r" (var_ptr));
+ L_CHECK (a, 0x33334444);
+#endif
+}
+
+void
+test_c_flw ()
+{
+#if defined (SKIP_c_flw)
+ info ("--- Disable c.flw");
+#else
+ static float g_data[] = { 1.23f, 3.14f, -5.6f, 10.9f };
+ float a = 0;
+ riscv_reg_t var_ptr;
+
+ info ("Testing c.flw");
+
+ var_ptr = (riscv_reg_t) &g_data[0];
+
+ asm volatile ("c.flw %0,0(%1)" : "=f" (a) : "r" (var_ptr));
+ F_CHECK (a, 1.23f);
+
+ asm volatile ("c.flw %0,4(%1)" : "=f" (a) : "r" (var_ptr));
+ F_CHECK (a, 3.14f);
+
+ asm volatile ("c.flw %0,8(%1)" : "=f" (a) : "r" (var_ptr));
+ F_CHECK (a, -5.6f);
+
+ asm volatile ("c.flw %0,12(%1)" : "=f" (a) : "r" (var_ptr));
+ F_CHECK (a, 10.9f);
+#endif
+}
+
+void
+test_c_fld ()
+{
+#if defined (SKIP_c_fld)
+ info ("--- Disable c.fld");
+#else
+ static double g_data[] = { 1234.5, -7890.4 };
+ double a = 0;
+ riscv_reg_t var_ptr;
+
+ info ("Testing c.fld");
+
+ var_ptr = (riscv_reg_t) &g_data[0];
+
+ asm volatile ("c.fld %0,0(%1)" : "=f" (a) : "r" (var_ptr));
+ D_CHECK (a, 1234.5);
+
+ asm volatile ("c.fld %0,8(%1)" : "=f" (a) : "r" (var_ptr));
+ D_CHECK (a, -7890.4);
+#endif
+}
+
+void
+test_c_sw ()
+{
+ volatile uint32_t g_data[] = { 0x1111, 0x2222, 0x3333, 0x4444 };
+ uint32_t a;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.sw");
+
+ var_ptr = (riscv_reg_t) &g_data[0];
+
+ a = 0xbeef;
+ asm volatile ("c.sw %0,0(%1)" : : "r" (a), "r" (var_ptr));
+
+ a = 0xdead;
+ asm volatile ("c.sw %0,4(%1)" : : "r" (a), "r" (var_ptr));
+
+ a = 0xabcd;
+ asm volatile ("c.sw %0,8(%1)" : : "r" (a), "r" (var_ptr));
+
+ a = 0x1298;
+ asm volatile ("c.sw %0,12(%1)" : : "r" (a), "r" (var_ptr));
+
+ I_CHECK (g_data[0], 0xbeef);
+ I_CHECK (g_data[1], 0xdead);
+ I_CHECK (g_data[2], 0xabcd);
+ I_CHECK (g_data[3], 0x1298);
+}
+
+void
+test_c_sd ()
+{
+#if defined (SKIP_c_sd)
+ info ("--- Disable c.sd");
+#else
+ volatile uint64_t g_data[] = { 0x1111, 0x2222, 0x3333, 0x4444 };
+ uint64_t a;
+ riscv_reg_t var_ptr;
+
+ info ("Testing c.sd");
+
+ var_ptr = (riscv_reg_t) &g_data[0];
+
+ a = 0xbeefdead;
+ asm volatile ("c.sd %0,0(%1)" : : "r" (a), "r" (var_ptr));
+
+ a = 0xabcd1298;
+ asm volatile ("c.sd %0,8(%1)" : : "r" (a), "r" (var_ptr));
+
+ L_CHECK (g_data[0], 0xbeefdead);
+ L_CHECK (g_data[1], 0xabcd1298);
+#endif
+}
+
+void
+test_c_fsw ()
+{
+#if defined (SKIP_c_fsw)
+ info ("--- Disable c.fsw");
+#else
+ volatile float g_data[] = { 1.0f, 2.0f, 3.0f, -4.0f };
+ float a;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.fsw");
+
+ var_ptr = (riscv_reg_t) &g_data[0];
+
+ a = 12.5f;
+ asm volatile ("c.fsw %0,0(%1)" : : "f" (a), "r" (var_ptr));
+
+ a = -7.9f;
+ asm volatile ("c.fsw %0,4(%1)" : : "f" (a), "r" (var_ptr));
+
+ a = 123.4f;
+ asm volatile ("c.fsw %0,8(%1)" : : "f" (a), "r" (var_ptr));
+
+ a = 0.5f;
+ asm volatile ("c.fsw %0,12(%1)" : : "f" (a), "r" (var_ptr));
+
+ F_CHECK (g_data[0], 12.5f);
+ F_CHECK (g_data[1], -7.9f);
+ F_CHECK (g_data[2], 123.4f);
+ F_CHECK (g_data[3], 0.5f);
+#endif
+}
+
+void
+test_c_fsd ()
+{
+#if defined (SKIP_c_fsd)
+ info ("--- Disable c.fsd");
+#else
+ volatile double g_data[] = { 1.0, 2.0 };
+ double a;
+ riscv_reg_t stack_ptr, var_ptr;
+
+ info ("Testing c.fsd");
+
+ var_ptr = (riscv_reg_t) &g_data[0];
+
+ a = 1234.5;
+ asm volatile ("c.fsd %0,0(%1)" : : "f" (a), "r" (var_ptr));
+
+ a = -7892.9;
+ asm volatile ("c.fsd %0,8(%1)" : : "f" (a), "r" (var_ptr));
+
+ F_CHECK (g_data[0], 1234.5);
+ F_CHECK (g_data[1], -7892.9);
+
+#endif
+}
+
+void
+test_c_j ()
+{
+ volatile int a = 0, b = 5;
+
+ info ("Testing c.j");
+
+label_b:
+ /* If we have jumped back. */
+ if (a == 7 && b == 5)
+ return;
+
+ asm goto ("c.j %l[label_f]" : : : : label_f);
+ asm volatile ("c.mv %0, %1" : "=r" (a) : "r" (b));
+
+ FAIL ("Jumped at wrong location");
+ return;
+
+label_f:
+
+ I_CHECK (a, 0);
+ I_CHECK (b, 5);
+
+ a = 7;
+ asm goto ("c.j %l[label_b]" : : : : label_b);
+ asm volatile ("c.mv %0, %1" : "=r" (a) : "r" (b));
+}
+
+void
+test_c_jal ()
+{
+#if defined (SKIP_c_jal)
+ info ("--- Disable c.jal");
+#else
+ volatile int a = 0, b = 5;
+ riscv_reg_t ra;
+
+ info ("Testing c.jal");
+
+label_b:
+ /* If we have jumped back. */
+ if (a == 7 && b == 5)
+ {
+ asm volatile ("c.mv %0, ra" : "=r" (ra));
+ L_CHECK (ra, (riscv_reg_t) &&ret_label_2);
+ return;
+ }
+
+ asm goto ("c.jal %l[label_f]" : : : : label_f);
+ret_label_1:
+ asm volatile ("c.mv %0, %1" : "=r" (a) : "r" (b));
+
+ FAIL ("Jumped at wrong location");
+ return;
+
+label_f:
+ asm volatile ("c.mv %0, ra" : "=r" (ra));
+
+ L_CHECK (ra, (riscv_reg_t) &&ret_label_1);
+ I_CHECK (a, 0);
+ I_CHECK (b, 5);
+
+ /* Jump back. */
+ a = 7;
+ asm goto ("c.jal %l[label_b]" : : : : label_b);
+ret_label_2:
+ asm volatile ("c.nop");
+#endif
+}
+
+void
+test_c_jr ()
+{
+ volatile int a = 0, b = 5;
+ riscv_reg_t addr;
+
+ info ("Testing c.jr");
+
+ addr = (riscv_reg_t) &&label_f;
+
+label_b:
+ /* If we have jumped back. */
+ if (a == 7 && b == 5)
+ return;
+
+ asm volatile ("c.jr %0" : : "r" (addr));
+ asm volatile ("c.mv %0, %1" : "=r" (a) : "r" (b));
+
+ FAIL ("Jumped at wrong location");
+
+label_f:
+ I_CHECK(a, 0);
+ I_CHECK(b, 5);
+
+ a = 7;
+ addr = (riscv_reg_t) &&label_b;
+ asm volatile ("c.jr %0" : : "r" (addr));
+
+ FAIL ("Jumped at wrong location");
+}
+
+void
+test_c_jalr ()
+{
+ volatile int a = 0, b = 5;
+ riscv_reg_t addr, ra;
+
+ info ("Testing c.jalr");
+
+ addr = (riscv_reg_t) &&label_f;
+
+label_b:
+ /* If we have jumped back. */
+ if (a == 7)
+ {
+ asm volatile ("c.mv %0, ra" : "=r" (ra));
+ L_CHECK (ra, (riscv_reg_t) &&label_ra_2);
+ return;
+ }
+
+ asm volatile ("c.jalr %0" : : "r" (addr));
+label_ra_1:
+ asm volatile ("c.nop");
+ asm volatile ("c.mv %0, %1" : "=r" (a) : "r" (b));
+
+ FAIL ("Jumped at wrong location");
+
+label_f:
+ asm volatile ("c.mv %0, ra" : "=r" (ra));
+
+ L_CHECK (ra, (riscv_reg_t) &&label_ra_1);
+ I_CHECK (a, 0);
+ I_CHECK (b, 5);
+
+ a = 7;
+ addr = (riscv_reg_t) &&label_b;
+ asm volatile ("c.jalr %0" : : "r" (addr));
+label_ra_2:
+ asm volatile ("c.nop");
+
+ FAIL ("Jumped at wrong location");
+}
+
+void
+test_c_beqz ()
+{
+ volatile int zero = 0, non_zero = 1;
+ int a = 0, b = 5;
+
+ info ("Testing c.beqz");
+
+label_b_1:
+ /* If we have branched back. */
+ if (a == 17)
+ {
+ I_CHECK (b, 8);
+ if (b != 8)
+ FAIL ("Backward-taken branch failed");
+ return;
+ }
+
+ asm goto ("c.beqz %0, %l[label_f_1]"
+ : : "r" (zero) : : label_f_1); /* Forward taken. */
+ asm volatile ("c.li %0, 7" : "=r" (a));
+ asm volatile ("c.li %0, 8" : "=r" (b));
+
+label_f_1:
+ I_CHECK (a, 0);
+ I_CHECK (b, 5);
+ if (a != 0 || b != 5)
+ {
+ FAIL ("Forward-taken branch failed");
+ return;
+ }
+
+ asm goto ("c.beqz %0, %l[label_f_2]"
+ :
+ : "r" (non_zero)
+ :
+ : label_f_2); /* Not taken. */
+
+ a = 7;
+ b = 8;
+
+label_f_2:
+ I_CHECK (a, 7);
+ I_CHECK (b, 8);
+ if (a != 7 || b != 8)
+ {
+ FAIL ("Not-taken branch failed");
+ return;
+ }
+
+ /* Branch back. */
+ a = 17;
+ asm goto ("c.beqz %0, %l[label_b_1]"
+ :
+ : "r" (zero)
+ :
+ : label_b_1); /* Backward taken. */
+
+ FAIL ("Backward-taken branch failed");
+}
+
+void
+test_c_bnez ()
+{
+ volatile int zero = 0, non_zero = 1;
+ int a = 0, b = 5;
+
+ info ("Testing c.bnez");
+
+label_b_1:
+ /* If we have branched back. */
+ if (a == 17)
+ {
+ if (b != 8)
+ FAIL ("Backward-taken branch failed");
+ return;
+ }
+
+ asm goto ("c.bnez %0, %l[label_f_1]"
+ :
+ : "r" (non_zero)
+ :
+ : label_f_1); /* Forward taken. */
+ asm volatile ("c.li %0, 7" : "=r" (a));
+ asm volatile ("c.li %0, 8" : "=r" (b));
+
+label_f_1:
+ if (a != 0 || b != 5)
+ {
+ FAIL ("Forward-taken branch failed");
+ num_fail++;
+ return;
+ }
+
+ asm goto ("c.bnez %0, %l[label_f_2]"
+ :
+ : "r" (zero)
+ :
+ : label_f_2); /* Not taken. */
+ a = 7;
+ b = 8;
+
+label_f_2:
+ if (a != 7 || b != 8)
+ {
+ FAIL ("Not-taken branch failed");
+ num_fail++;
+ return;
+ }
+
+ /* Branch back. */
+ a = 17;
+ asm goto ("c.bnez %0, %l[label_b_1]"
+ :
+ : "r" (non_zero)
+ :
+ : label_b_1); /* Backward taken. */
+
+ FAIL ("backward-taken branch failed");
+ num_fail++;
+}
+
+void
+test_c_li ()
+{
+ riscv_reg_t a, b, c;
+
+ info ("Testing c.li");
+
+ asm volatile ("c.li %0,0" : "=r" (a));
+ asm volatile ("c.li %0,-1" : "=r" (b));
+ asm volatile ("c.li %0,31" : "=r" (c));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (a, 0);
+ L_CHECK (b, -1);
+ L_CHECK (c, 31);
+#else
+ I_CHECK (a, 0);
+ I_CHECK (b, -1);
+ I_CHECK (c, 31);
+#endif
+}
+
+void
+test_c_lui ()
+{
+ int a = 0;
+
+ info ("Testing c.lui");
+
+ asm volatile ("c.lui %0,1" : "=r" (a));
+ I_CHECK (a, 0x1000);
+
+ asm volatile ("c.lui %0,31" : "=r" (a));
+ I_CHECK (a, 0x1F000);
+}
+
+void
+test_c_addi ()
+{
+ int a = 0;
+
+ info ("Testing c.addi");
+
+ asm volatile ("c.addi %0,1" : "+r" (a));
+ I_CHECK (a, 1);
+
+ asm volatile ("c.addi %0,-1" : "+r" (a));
+ I_CHECK (a, 0);
+
+ asm volatile ("c.addi %0,31" : "+r" (a));
+ I_CHECK (a, 31);
+}
+
+void
+test_c_addiw ()
+{
+#if defined (SKIP_c_addiw)
+ info ("--- Disable c.addiw");
+#else
+ int a = 1;
+
+ info ("Testing c.addiw");
+
+ asm volatile ("c.addiw %0,0" : "+r" (a));
+ I_CHECK (a, 1);
+
+ asm volatile ("c.addiw %0,-1" : "+r" (a));
+ I_CHECK (a, 0);
+
+ asm volatile ("c.addiw %0,31" : "+r" (a));
+ I_CHECK (a, 31);
+#endif
+}
+
+void
+test_c_addi16sp ()
+{
+ volatile riscv_reg_t orig_sp, sp_1, sp_2;
+
+ info ("Testing c.addi16sp");
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated"
+ asm volatile ("\
+ c.mv %0, sp \n\
+ c.addi16sp sp,32 \n\
+ c.mv %1, sp \n\
+ c.addi16sp sp,-32 \n\
+ c.mv %2, sp \n\
+ c.mv sp, %0" /* Restore sp. */
+ : "=r" (orig_sp), "=r" (sp_1), "=r" (sp_2)
+ : "r" (orig_sp)
+ : "sp");
+#pragma GCC diagnostic pop
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (orig_sp, sp_2);
+ L_CHECK ((sp_1 - sp_2), 32);
+#else
+ I_CHECK (orig_sp, sp_2);
+ I_CHECK ((sp_1 - sp_2), 32);
+#endif
+}
+
+void
+test_c_addi4spn ()
+{
+ volatile riscv_reg_t orig_sp, sp_1, sp_2;
+
+ info ("Testing c.addi4spn");
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated"
+ asm volatile ("\
+ c.mv %0, sp \n\
+ c.addi4spn %1,sp,16 \n\
+ c.addi4spn %2,sp,32"
+ : "=r" (orig_sp), "=r" (sp_1), "=r" (sp_2)
+ : "r" (orig_sp)
+ : "sp");
+#pragma GCC diagnostic pop
+
+#if (__riscv_xlen >= 64)
+ L_CHECK ((orig_sp + 16), sp_1);
+ L_CHECK ((orig_sp + 32), sp_2);
+#else
+ I_CHECK ((orig_sp + 16), sp_1);
+ I_CHECK ((orig_sp + 32), sp_2);
+#endif
+}
+
+void
+test_c_slli ()
+{
+ volatile riscv_reg_t val = -5, r1, r2, r3;
+
+ info ("Testing c.slli");
+
+ asm volatile ("c.slli %0,1" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r1) : "r" (val));
+ asm volatile ("c.slli %0,2" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r2) : "r" (val));
+ asm volatile ("c.slli %0,3" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r3) : "r" (val));
+ asm volatile ("c.slli %0,4" : "+r" (val));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (r1, -10);
+ L_CHECK (r2, -40);
+ L_CHECK (r3, -320);
+ L_CHECK (val, -5120);
+#else
+ I_CHECK (r1, -10);
+ I_CHECK (r2, -40);
+ I_CHECK (r3, -320);
+ I_CHECK (val, -5120);
+#endif
+
+ val = 5;
+ asm volatile ("c.slli %0,1" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r1) : "r" (val));
+ asm volatile ("c.slli %0,2" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r2) : "r" (val));
+ asm volatile ("c.slli %0,3" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r3) : "r" (val));
+ asm volatile ("c.slli %0,4" : "+r" (val));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (r1, 10);
+ L_CHECK (r2, 40);
+ L_CHECK (r3, 320);
+ L_CHECK (val, 5120);
+#else
+ I_CHECK (r1, 10);
+ I_CHECK (r2, 40);
+ I_CHECK (r3, 320);
+ I_CHECK (val, 5120);
+#endif
+}
+
+void
+test_c_srli ()
+{
+ volatile riscv_reg_t val = -105, r1, r2, r3;
+
+ info ("Testing c.srli");
+
+ asm volatile ("c.srli %0,1" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r1) : "r" (val));
+ asm volatile ("c.srli %0,2" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r2) : "r" (val));
+ asm volatile ("c.srli %0,3" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r3) : "r" (val));
+ asm volatile ("c.srli %0,4" : "+r" (val));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (r1, 0x7fffffffffffffcbul);
+ L_CHECK (r2, 0x1ffffffffffffff2ul);
+ L_CHECK (r3, 0x3fffffffffffffeul);
+ L_CHECK (val, 0x3ffffffffffffful);
+#else
+ I_CHECK (r1, 0x7fffffcb);
+ I_CHECK (r2, 0x1ffffff2);
+ I_CHECK (r3, 0x3fffffe);
+ I_CHECK (val, 0x3fffff);
+#endif
+
+ val = 105;
+ asm volatile ("c.srli %0,1" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r1) : "r" (val));
+ asm volatile ("c.srli %0,2" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r2) : "r" (val));
+ asm volatile ("c.srli %0,3" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r3) : "r" (val));
+ asm volatile ("c.srli %0,4" : "+r" (val));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (r1, 52);
+ L_CHECK (r2, 13);
+ L_CHECK (r3, 1);
+ L_CHECK (val, 0);
+#else
+ I_CHECK (r1, 52);
+ I_CHECK (r2, 13);
+ I_CHECK (r3, 1);
+ I_CHECK (val, 0);
+#endif
+}
+
+void
+test_c_srai ()
+{
+ volatile riscv_reg_t val = -105, r1, r2, r3;
+
+ info ("Testing c.srai");
+
+ asm volatile ("c.srai %0,1" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r1) : "r" (val));
+ asm volatile ("c.srai %0,2" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r2) : "r" (val));
+ asm volatile ("c.srai %0,3" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r3) : "r" (val));
+ asm volatile ("c.srai %0,4" : "+r" (val));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (r1, -53);
+ L_CHECK (r2, -14);
+ L_CHECK (r3, -2);
+ L_CHECK (val, -1);
+#else
+ I_CHECK (r1, -53);
+ I_CHECK (r2, -14);
+ I_CHECK (r3, -2);
+ I_CHECK (val, -1);
+#endif
+
+ val = 105;
+ asm volatile ("c.srai %0,1" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r1) : "r" (val));
+ asm volatile ("c.srai %0,2" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r2) : "r" (val));
+ asm volatile ("c.srai %0,3" : "+r" (val));
+ asm volatile ("c.mv %0,%1" : "=r" (r3) : "r" (val));
+ asm volatile ("c.srai %0,4" : "+r" (val));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (r1, 52);
+ L_CHECK (r2, 13);
+ L_CHECK (r3, 1);
+ L_CHECK (val, 0);
+#else
+ I_CHECK (r1, 52);
+ I_CHECK (r2, 13);
+ I_CHECK (r3, 1);
+ I_CHECK (val, 0);
+#endif
+}
+
+void
+test_c_andi ()
+{
+ riscv_reg_t val1 = -1, val2 = 0x101;
+
+ info ("Testing c.andi");
+
+ asm volatile ("c.andi %0,5" : "+r" (val1));
+ asm volatile ("c.andi %0,7" : "+r" (val2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (val1, 5);
+ L_CHECK (val2, 1);
+#else
+ I_CHECK (val1, 5);
+ I_CHECK (val2, 1);
+#endif
+}
+
+void
+test_c_add ()
+{
+ riscv_reg_t dst, rs2;
+
+ info ("Testing c.add");
+
+ dst = -1;
+ rs2 = 1;
+ asm volatile ("c.add %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 0);
+ L_CHECK (rs2, 1);
+#else
+ I_CHECK (dst, 0);
+ I_CHECK (rs2, 1);
+#endif
+
+ dst = -1;
+ rs2 = 0;
+ asm volatile ("c.add %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, -1);
+ L_CHECK (rs2, 0);
+#else
+ I_CHECK (dst, -1);
+ I_CHECK (rs2, 0);
+#endif
+}
+
+void
+test_c_and ()
+{
+ riscv_reg_t dst, rs2;
+
+ info ("Testing c.and");
+
+ dst = -1;
+ rs2 = 1;
+ asm volatile ("c.and %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 1);
+ L_CHECK (rs2, 1);
+#else
+ I_CHECK (dst, 1);
+ I_CHECK (rs2, 1);
+#endif
+
+ dst = -1;
+ rs2 = 0;
+ asm volatile ("c.and %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 0);
+ L_CHECK (rs2, 0);
+#else
+ I_CHECK (dst, 0);
+ I_CHECK (rs2, 0);
+#endif
+}
+
+void
+test_c_or ()
+{
+ riscv_reg_t dst, rs2;
+
+ info ("Testing c.or");
+
+ dst = -3;
+ rs2 = 2;
+ asm volatile ("c.or %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, -1);
+ L_CHECK (rs2, 2);
+#else
+ I_CHECK (dst, -1);
+ I_CHECK (rs2, 2);
+#endif
+
+ dst = 0x7ffffffd;
+ rs2 = 0x80000002;
+ asm volatile ("c.or %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 0xffffffff);
+ L_CHECK (rs2, 0x80000002);
+#else
+ I_CHECK (dst, 0xffffffff);
+ I_CHECK (rs2, 0x80000002);
+#endif
+}
+
+void
+test_c_xor ()
+{
+ riscv_reg_t dst, rs2;
+
+ info ("Testing c.xor");
+
+ dst = -3;
+ rs2 = -3;
+ asm volatile ("c.xor %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 0);
+ L_CHECK (rs2, -3);
+#else
+ I_CHECK (dst, 0);
+ I_CHECK (rs2, -3);
+#endif
+
+ dst = 0x7ffffffd;
+ rs2 = 0x80000002;
+ asm volatile ("c.xor %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 0xffffffff);
+ L_CHECK (rs2, 0x80000002);
+#else
+ I_CHECK (dst, 0xffffffff);
+ I_CHECK (rs2, 0x80000002);
+#endif
+}
+
+void
+test_c_sub ()
+{
+ riscv_reg_t dst, rs2;
+
+ info ("Testing c.sub");
+
+ dst = -1;
+ rs2 = 1;
+ asm volatile ("c.sub %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, -2);
+ L_CHECK (rs2, 1);
+#else
+ I_CHECK (dst, -2);
+ I_CHECK (rs2, 1);
+#endif
+
+ dst = 0;
+ rs2 = -1;
+ asm volatile ("c.sub %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 1);
+ L_CHECK (rs2, -1);
+#else
+ I_CHECK (dst, 1);
+ I_CHECK (rs2, -1);
+#endif
+}
+
+void
+test_c_addw ()
+{
+#if defined (SKIP_c_addw)
+ info ("--- Disable c.addw");
+#else
+
+ riscv_reg_t dst, rs2;
+
+ info ("Testing c.addw");
+
+ dst = -1;
+ rs2 = 1;
+ asm volatile ("c.addw %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 0);
+ L_CHECK (rs2, 1);
+#else
+ I_CHECK (dst, 0);
+ I_CHECK (rs2, 1);
+#endif
+
+ dst = -1;
+ rs2 = 0;
+ asm volatile ("c.addw %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, -1);
+ L_CHECK (rs2, 0);
+#else
+ I_CHECK (dst, -1);
+ I_CHECK (rs2, 0);
+#endif
+#endif
+}
+
+void
+test_c_subw ()
+{
+#if defined (SKIP_c_subw)
+ info ("--- Disable c.subw");
+#else
+ riscv_reg_t dst, rs2;
+
+ info ("Testing c.subw");
+
+ dst = -1;
+ rs2 = 1;
+ asm volatile ("c.subw %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, -2);
+ L_CHECK (rs2, 1);
+#else
+ I_CHECK (dst, -2);
+ I_CHECK (rs2, 1);
+#endif
+
+ dst = 0;
+ rs2 = -1;
+ asm volatile ("c.subw %0,%1" : "+r" (dst) : "r" (rs2));
+
+#if (__riscv_xlen >= 64)
+ L_CHECK (dst, 1);
+ L_CHECK (rs2, -1);
+#else
+ I_CHECK (dst, 1);
+ I_CHECK (rs2, -1);
+#endif
+#endif
+}
+
+int
+main ()
+{
+ test_c_lwsp ();
+ test_c_ldsp ();
+ test_c_flwsp ();
+ test_c_fldsp ();
+ test_c_swsp ();
+ test_c_sdsp ();
+ test_c_fswsp ();
+ test_c_fsdsp ();
+ test_c_lw ();
+ test_c_ld ();
+ test_c_flw ();
+ test_c_fld ();
+ test_c_sw ();
+ test_c_sd ();
+ test_c_fsw ();
+ test_c_fsd ();
+ test_c_j ();
+ test_c_jal ();
+ test_c_jr ();
+ test_c_jalr ();
+ test_c_beqz ();
+ test_c_bnez ();
+ test_c_li ();
+ test_c_lui ();
+ test_c_addi ();
+ test_c_addiw ();
+ test_c_addi16sp ();
+ test_c_addi4spn ();
+ test_c_slli ();
+ test_c_srli ();
+ test_c_srai ();
+ test_c_andi ();
+ test_c_add ();
+ test_c_and ();
+ test_c_or ();
+ test_c_xor ();
+ test_c_sub ();
+ test_c_addw ();
+ test_c_subw ();
+
+ if (num_fail == 0)
+ {
+ print ("*** All %d tests pass\n", total_tests);
+ }
+ else
+ {
+ print ("*** Total %d tests out of %d fail\n", num_fail, total_tests);
+ }
+
+ return num_fail;
+}
new file mode 100755
@@ -0,0 +1,31 @@
+# Copyright 2023 Free Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Tests to check instruction simulation of RISC-V instructions.
+
+require {istarget "riscv*-*-*"}
+
+standard_testfile
+
+if { [prepare_for_testing "failed to prepare" ${testfile} ${srcfile} \
+ {debug quiet}] } {
+ return -1
+}
+
+if { ![runto_main] } {
+ return -1
+}
+
+gdb_continue_to_end ".*All.*tests pass.*Inferior.*process.*exited normally.*"
@@ -107,4 +107,8 @@ static int gdb_open_modeflags[12] = {
GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY
};
+#define C_REG(X) ((X) + 8) /* Register in a compressed instruction. */
+#define REG_RA 1 /* Return address register. */
+#define REG_SP 2 /* Stack piner register. */
+
#endif
@@ -1193,6 +1193,317 @@ execute_a (SIM_CPU *cpu, unsigned_word iw, const struct riscv_opcode *op)
return pc;
}
+static sim_cia
+execute_c (SIM_CPU *cpu, unsigned_word iw, const struct riscv_opcode *op)
+{
+ SIM_DESC sd = CPU_STATE (cpu);
+ struct riscv_sim_cpu *riscv_cpu = RISCV_SIM_CPU (cpu);
+ int rd = (iw >> OP_SH_RD) & OP_MASK_RD;
+ int rs1_c = ((iw >> OP_SH_CRS1S) & OP_MASK_CRS1S) + 8;
+ int rs2 = (iw >> OP_SH_CRS2) & OP_MASK_CRS2;
+ int rs2_c = ((iw >> OP_SH_CRS2S) & OP_MASK_CRS2S) + 8;
+ const char *rd_name = riscv_gpr_names_abi[rd];
+ const char *rs1_c_name = riscv_gpr_names_abi[rs1_c];
+ const char *rs2_name = riscv_gpr_names_abi[rs2];
+ const char *rs2_c_name = riscv_gpr_names_abi[rs2_c];
+ signed_word imm;
+ unsigned_word tmp;
+ sim_cia pc = riscv_cpu->pc + 2;
+
+ switch (op->match)
+ {
+ case MATCH_C_JR | MATCH_C_MV:
+ switch (op->mask)
+ {
+ case MASK_C_MV:
+ TRACE_INSN (cpu, "c.mv %s, %s; // %s = %s",
+ rd_name, rs2_name, rd_name, rs2_name);
+ store_rd (cpu, rd, riscv_cpu->regs[rs2]);
+ break;
+ case MASK_C_JR:
+ TRACE_INSN (cpu, "c.jr %s;",
+ rd_name);
+ pc = riscv_cpu->regs[rd];
+ TRACE_BRANCH (cpu, "to %#" PRIxTW, pc);
+ break;
+ }
+ break;
+ case MATCH_C_J:
+ imm = EXTRACT_CJTYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.j %" PRIxTW,
+ imm);
+ pc = riscv_cpu->pc + imm;
+ TRACE_BRANCH (cpu, "to %#" PRIxTW, pc);
+ break;
+ case MATCH_C_JAL | MATCH_C_ADDIW:
+ /* JAL and ADDIW have the same mask, so switch based on op name. */
+ switch (op->name[2])
+ {
+ case 'j':
+ imm = EXTRACT_CJTYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.jal %" PRIxTW,
+ imm);
+ store_rd (cpu, REG_RA, riscv_cpu->pc + 2);
+ pc = riscv_cpu->pc + imm;
+ TRACE_BRANCH (cpu, "to %#" PRIxTW, pc);
+ break;
+ case 'a':
+ imm = EXTRACT_CITYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.addiw %s, %s, %#" PRIxTW "; // %s += %#" PRIxTW,
+ rd_name, rd_name, imm, rd_name, imm);
+ RISCV_ASSERT_RV64 (cpu, "insn: %s", op->name);
+ store_rd (cpu, rd, EXTEND32 (riscv_cpu->regs[rd] + imm));
+ break;
+ default:
+ TRACE_INSN (cpu, "UNHANDLED INSN: %s", op->name);
+ sim_engine_halt (sd, cpu, NULL, riscv_cpu->pc, sim_signalled,
+ SIM_SIGILL);
+ }
+ break;
+ case MATCH_C_JALR | MATCH_C_ADD | MATCH_C_EBREAK:
+ switch (op->mask)
+ {
+ case MASK_C_ADD:
+ TRACE_INSN (cpu, "c.add %s, %s; // %s += %s",
+ rd_name, rs2_name, rd_name, rs2_name);
+ store_rd (cpu, rd, riscv_cpu->regs[rd] + riscv_cpu->regs[rs2]);
+ break;
+ case MASK_C_JALR:
+ TRACE_INSN (cpu, "c.jalr %s, %s;",
+ riscv_gpr_names_abi[REG_RA], rd_name);
+ store_rd (cpu, REG_RA, riscv_cpu->pc + 2);
+ pc = riscv_cpu->regs[rd];
+ TRACE_BRANCH (cpu, "to %#" PRIxTW, pc);
+ break;
+ case MASK_C_EBREAK:
+ TRACE_INSN (cpu, "ebreak");
+ sim_engine_halt (sd, cpu, NULL, riscv_cpu->pc, sim_stopped,
+ SIM_SIGTRAP);
+ }
+ break;
+ case MATCH_C_BEQZ:
+ imm = EXTRACT_CBTYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.beqz %s, %#" PRIxTW "; "
+ "// if (%s == 0) goto %#" PRIxTW,
+ rs1_c_name, imm, rs1_c_name, riscv_cpu->pc + imm);
+ if (riscv_cpu->regs[rs1_c] == riscv_cpu->regs[0])
+ {
+ pc = riscv_cpu->pc + imm;
+ TRACE_BRANCH (cpu, "to %#" PRIxTW, pc);
+ }
+ break;
+ case MATCH_C_BNEZ:
+ imm = EXTRACT_CBTYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.bnez %s, %#" PRIxTW "; "
+ "// if (%s != 0) goto %#" PRIxTW,
+ rs1_c_name, imm, rs1_c_name, riscv_cpu->pc + imm);
+ if (riscv_cpu->regs[rs1_c] != riscv_cpu->regs[0])
+ {
+ pc = riscv_cpu->pc + imm;
+ TRACE_BRANCH (cpu, "to %#" PRIxTW, pc);
+ }
+ break;
+ case MATCH_C_LWSP:
+ imm = EXTRACT_CITYPE_LWSP_IMM (iw);
+ TRACE_INSN (cpu, "c.lwsp %s, %" PRIiTW "(sp);",
+ rd_name, imm);
+ store_rd (cpu, rd, EXTEND32 (
+ sim_core_read_unaligned_4 (cpu, riscv_cpu->pc, read_map,
+ riscv_cpu->regs[REG_SP] + imm)));
+ break;
+ case MATCH_C_LW:
+ imm = EXTRACT_CLTYPE_LW_IMM (iw);
+ TRACE_INSN (cpu, "c.lw %s, %" PRIiTW "(%s);",
+ rs2_c_name, imm, rs1_c_name);
+ store_rd (cpu, rs2_c, EXTEND32 (
+ sim_core_read_unaligned_4 (cpu, riscv_cpu->pc, read_map,
+ riscv_cpu->regs[rs1_c] + imm)));
+ break;
+ case MATCH_C_SWSP:
+ imm = EXTRACT_CSSTYPE_SWSP_IMM (iw);
+ TRACE_INSN (cpu, "c.swsp %s, %" PRIiTW "(sp);",
+ rs2_name, imm);
+ sim_core_write_unaligned_4 (cpu, riscv_cpu->pc, write_map,
+ riscv_cpu->regs[REG_SP] + imm,
+ riscv_cpu->regs[rs2]);
+ break;
+ case MATCH_C_SW:
+ imm = EXTRACT_CLTYPE_LW_IMM (iw);
+ TRACE_INSN (cpu, "c.sw %s, %" PRIiTW "(%s);",
+ rs2_c_name, imm, rs1_c_name);
+ sim_core_write_unaligned_4 (cpu, riscv_cpu->pc, write_map,
+ riscv_cpu->regs[rs1_c] + (imm),
+ riscv_cpu->regs[rs2_c]);
+ break;
+ case MATCH_C_ADDI:
+ imm = EXTRACT_CITYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.addi %s, %s, %#" PRIxTW "; // %s += %#" PRIxTW,
+ rd_name, rd_name, imm, rd_name, imm);
+ store_rd (cpu, rd, riscv_cpu->regs[rd] + imm);
+ break;
+ case MATCH_C_LUI:
+ imm = EXTRACT_CITYPE_LUI_IMM (iw);
+ TRACE_INSN (cpu, "c.lui %s, %#" PRIxTW ";",
+ rd_name, imm);
+ store_rd (cpu, rd, imm);
+ break;
+ case MATCH_C_LI:
+ imm = EXTRACT_CITYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.li %s, %#" PRIxTW "; // %s = %#" PRIxTW,
+ rd_name, imm, rd_name, imm);
+ store_rd (cpu, rd, imm);
+ break;
+ case MATCH_C_ADDI4SPN:
+ imm = EXTRACT_CIWTYPE_ADDI4SPN_IMM (iw);
+ TRACE_INSN (cpu, "c.addi4spn %s, %" PRIiTW "; // %s = sp + %" PRIiTW,
+ rs2_c_name, imm, rs2_c_name, imm);
+ store_rd (cpu, rs2_c, riscv_cpu->regs[REG_SP] + (imm));
+ break;
+ case MATCH_C_ADDI16SP:
+ imm = EXTRACT_CITYPE_ADDI16SP_IMM (iw);
+ TRACE_INSN (cpu, "c.addi16sp %s, %" PRIiTW "; // %s = sp + %" PRIiTW,
+ rd_name, imm, rd_name, imm);
+ store_rd (cpu, rd, riscv_cpu->regs[REG_SP] + imm);
+ break;
+ case MATCH_C_SUB:
+ TRACE_INSN (cpu, "c.sub %s, %s; // %s = %s - %s",
+ rs1_c_name, rs2_c_name, rs1_c_name, rs1_c_name, rs2_c_name);
+ store_rd (cpu, rs1_c, riscv_cpu->regs[rs1_c] - riscv_cpu->regs[rs2_c]);
+ break;
+ case MATCH_C_AND:
+ TRACE_INSN (cpu, "c.and %s, %s; // %s = %s & %s",
+ rs1_c_name, rs2_c_name, rs1_c_name, rs1_c_name, rs2_c_name);
+ store_rd (cpu, rs1_c, riscv_cpu->regs[rs1_c] & riscv_cpu->regs[rs2_c]);
+ break;
+ case MATCH_C_OR:
+ TRACE_INSN (cpu, "c.or %s, %s; // %s = %s | %s",
+ rs1_c_name, rs2_c_name, rs1_c_name, rs1_c_name, rs2_c_name);
+ store_rd (cpu, rs1_c, riscv_cpu->regs[rs1_c] | riscv_cpu->regs[rs2_c]);
+ break;
+ case MATCH_C_XOR:
+ TRACE_INSN (cpu, "c.xor %s, %s; // %s = %s ^ %s",
+ rs1_c_name, rs2_c_name, rs1_c_name, rs1_c_name, rs2_c_name);
+ store_rd (cpu, rs1_c, riscv_cpu->regs[rs1_c] ^ riscv_cpu->regs[rs2_c]);
+ break;
+ case MATCH_C_SLLI | MATCH_C_SLLI64:
+ if (op->mask == MASK_C_SLLI64)
+ {
+ /* Reserved for custom use. */
+ TRACE_INSN (cpu, "UNHANDLED INSN: %s", op->name);
+ sim_engine_halt (sd, cpu, NULL, riscv_cpu->pc, sim_signalled,
+ SIM_SIGILL);
+ break;
+ }
+ imm = EXTRACT_CITYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.slli %s, %" PRIiTW "; // %s = %s << %#" PRIxTW,
+ rd_name, imm, rd_name, rd_name, imm);
+ store_rd (cpu, rd, riscv_cpu->regs[rd] << imm);
+ break;
+ case MATCH_C_SRLI | MATCH_C_SRLI64:
+ if (op->mask == MASK_C_SRLI64)
+ {
+ /* Reserved for custom use. */
+ TRACE_INSN (cpu, "UNHANDLED INSN: %s", op->name);
+ sim_engine_halt (sd, cpu, NULL, riscv_cpu->pc, sim_signalled,
+ SIM_SIGILL);
+ break;
+ }
+ imm = EXTRACT_CITYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.srli %s, %" PRIiTW "; // %s = %s >> %#" PRIxTW,
+ rs1_c_name, imm, rs1_c_name, rs1_c_name, imm);
+ if (RISCV_XLEN (cpu) == 32)
+ store_rd (cpu, rs1_c, ((uint32_t) riscv_cpu->regs[rs1_c]) >> imm);
+ else
+ store_rd (cpu, rs1_c, ((uint64_t) riscv_cpu->regs[rs1_c]) >> imm);
+ break;
+ case MATCH_C_SRAI | MATCH_C_SRAI64:
+ if (op->mask == MASK_C_SRAI64)
+ {
+ /* Reserved for custom use. */
+ TRACE_INSN (cpu, "UNHANDLED INSN: %s", op->name);
+ sim_engine_halt (sd, cpu, NULL, riscv_cpu->pc, sim_signalled,
+ SIM_SIGILL);
+ break;
+ }
+ imm = EXTRACT_CITYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.srai %s, %" PRIiTW "; // %s = %s >> %#" PRIxTW,
+ rs1_c_name, imm, rs1_c_name, rs1_c_name, imm);
+ if (RISCV_XLEN (cpu) == 32)
+ {
+ if (imm > 0x1f)
+ sim_engine_halt (sd, cpu, NULL, riscv_cpu->pc, sim_signalled,
+ SIM_SIGILL);
+ tmp = ashiftrt (riscv_cpu->regs[rs1_c], imm);
+ }
+ else
+ tmp = ashiftrt64 (riscv_cpu->regs[rs1_c], imm);
+ store_rd (cpu, rd, tmp);
+ break;
+ case MATCH_C_ANDI:
+ imm = EXTRACT_CITYPE_IMM (iw);
+ TRACE_INSN (cpu, "c.andi %s, %" PRIiTW "; // %s = %s & %#" PRIxTW,
+ rs1_c_name, imm, rs1_c_name, rs1_c_name, imm);
+ store_rd (cpu, rs1_c, riscv_cpu->regs[rs1_c] & imm);
+ break;
+ case MATCH_C_ADDW:
+ TRACE_INSN (cpu, "c.addw %s, %s; // %s = %s + %s",
+ rs1_c_name, rs2_c_name, rs1_c_name, rs1_c_name, rs2_c_name);
+ RISCV_ASSERT_RV64 (cpu, "insn: %s", op->name);
+ store_rd (cpu, rs1_c,
+ EXTEND32 (riscv_cpu->regs[rs1_c] + riscv_cpu->regs[rs2_c]));
+ break;
+ case MATCH_C_SUBW:
+ TRACE_INSN (cpu, "c.subw %s, %s; // %s = %s - %s",
+ rs1_c_name, rs2_c_name, rs1_c_name, rs1_c_name, rs2_c_name);
+ RISCV_ASSERT_RV64 (cpu, "insn: %s", op->name);
+ store_rd (cpu, rs1_c,
+ EXTEND32 (riscv_cpu->regs[rs1_c] - riscv_cpu->regs[rs2_c]));
+ break;
+ case MATCH_C_LDSP:
+ imm = EXTRACT_CITYPE_LDSP_IMM (iw);
+ TRACE_INSN (cpu, "c.ldsp %s, %" PRIiTW "(sp);",
+ rd_name, imm);
+ RISCV_ASSERT_RV64 (cpu, "insn: %s", op->name);
+ store_rd (cpu, rd,
+ sim_core_read_unaligned_8 (cpu, riscv_cpu->pc, read_map,
+ riscv_cpu->regs[REG_SP] + imm));
+ break;
+ case MATCH_C_LD:
+ imm = EXTRACT_CLTYPE_LD_IMM (iw);
+ TRACE_INSN (cpu, "c.ld %s, %" PRIiTW "(%s);",
+ rs1_c_name, imm, rs2_c_name);
+ RISCV_ASSERT_RV64 (cpu, "insn: %s", op->name);
+ store_rd (cpu, rs2_c,
+ sim_core_read_unaligned_8 (cpu, riscv_cpu->pc, read_map,
+ riscv_cpu->regs[rs1_c] + imm));
+ break;
+ case MATCH_C_SDSP:
+ imm = EXTRACT_CSSTYPE_SDSP_IMM (iw);
+ TRACE_INSN (cpu, "c.sdsp %s, %" PRIiTW "(sp);",
+ rs2_name, imm);
+ RISCV_ASSERT_RV64 (cpu, "insn: %s", op->name);
+ sim_core_write_unaligned_8 (cpu, riscv_cpu->pc, write_map,
+ riscv_cpu->regs[REG_SP] + imm,
+ riscv_cpu->regs[rs2]);
+ break;
+ case MATCH_C_SD:
+ imm = EXTRACT_CLTYPE_LD_IMM (iw);
+ TRACE_INSN (cpu, "c.sd %s, %" PRIiTW "(%s);",
+ rs2_c_name, imm, rs1_c_name);
+ RISCV_ASSERT_RV64 (cpu, "insn: %s", op->name);
+ sim_core_write_unaligned_8 (cpu, riscv_cpu->pc, write_map,
+ riscv_cpu->regs[rs1_c] + imm,
+ riscv_cpu->regs[rs2_c]);
+ break;
+ default:
+ TRACE_INSN (cpu, "UNHANDLED INSN: %s", op->name);
+ sim_engine_halt (sd, cpu, NULL, riscv_cpu->pc, sim_signalled,
+ SIM_SIGILL);
+ }
+
+ return pc;
+}
+
static sim_cia
execute_one (SIM_CPU *cpu, unsigned_word iw, const struct riscv_opcode *op)
{
@@ -1214,6 +1525,8 @@ execute_one (SIM_CPU *cpu, unsigned_word iw, const struct riscv_opcode *op)
case INSN_CLASS_M:
case INSN_CLASS_ZMMUL:
return execute_m (cpu, iw, op);
+ case INSN_CLASS_C:
+ return execute_c (cpu, iw, op);
default:
TRACE_INSN (cpu, "UNHANDLED EXTENSION: %d", op->insn_class);
sim_engine_halt (sd, cpu, NULL, riscv_cpu->pc, sim_signalled, SIM_SIGILL);
@@ -1239,17 +1552,10 @@ void step_once (SIM_CPU *cpu)
iw = sim_core_read_aligned_2 (cpu, pc, exec_map, pc);
- /* Reject non-32-bit opcodes first. */
len = riscv_insn_length (iw);
- if (len != 4)
- {
- sim_io_printf (sd, "sim: bad insn len %#x @ %#" PRIxTA ": %#" PRIxTW "\n",
- len, pc, iw);
- sim_engine_halt (sd, cpu, NULL, pc, sim_signalled, SIM_SIGILL);
- }
-
- iw |= ((unsigned_word) sim_core_read_aligned_2 (
- cpu, pc, exec_map, pc + 2) << 16);
+ if (len == 4)
+ iw |= ((unsigned_word) sim_core_read_aligned_2
+ (cpu, pc, exec_map, pc + 2) << 16);
TRACE_CORE (cpu, "0x%08" PRIxTW, iw);
new file mode 100644
@@ -0,0 +1,110 @@
+# Basic load store tests.
+# mach: riscv
+
+.include "testutils.inc"
+
+ .data
+ .align 4
+_data:
+ .word 1234
+ .word 0
+
+ start
+ la a0, _data
+
+ # Test load-store instructions.
+ .option push
+ .option arch, +c
+ c.lw a1,0(a0)
+ c.sw a1,4(a0)
+ c.lw a2,4(a0)
+ .option pop
+
+ li a5,1234
+ bne a1,a5,test_fail
+ bne a2,a5,test_fail
+
+ # Test basic arithmetic.
+ .option push
+ .option arch, +c
+ c.li a0,0
+ c.li a1,1
+ c.addi a0,1
+ c.addi a0,-1
+ c.addw a0,a1
+ c.subw a0,a1
+ .option pop
+
+ li a5,1
+ bne a0,x0,test_fail
+ bne a1,a5,test_fail
+
+ # Test logical operations.
+ .option push
+ .option arch, +c
+ c.li a0,7
+ c.li a1,7
+ c.li a2,4
+ c.li a3,3
+ c.li a4,3
+ c.andi a0,3
+ c.and a1,a0
+ c.or a2,a3
+ c.xor a4,a4
+ .option pop
+
+ li a5,3
+ bne a0,a5,test_fail
+ bne a1,a5,test_fail
+ bne a4,x0,test_fail
+ li a5,7
+ bne a2,a5,test_fail
+
+ # Test shift operations.
+ .option push
+ .option arch, +c
+ c.li a0,4
+ c.li a1,4
+ c.slli a0,1
+ c.srli a1,1
+ .option pop
+
+ li a5,8
+ bne a0,a5,test_fail
+ li a5,2
+ bne a1,a5,test_fail
+
+ # Test jump instruction.
+ .option push
+ .option arch, +c
+ c.j 1f
+ .option pop
+
+ j test_fail
+1:
+ la a0,2f
+
+ # Test jump register instruction.
+ .option push
+ .option arch, +c
+ c.jr a0
+ .option pop
+
+ j test_fail
+
+2:
+ # Test branch instruction.
+ .option push
+ .option arch, +c
+ c.li a0,1
+ c.beqz a0,test_fail
+ c.li a0,0
+ c.bnez a0,test_fail
+ .option pop
+
+test_pass:
+ pass
+ fail
+
+test_fail:
+ fail