@@ -3145,7 +3145,7 @@ fi
if test "$enable_gprofng" = "yes"; then
case "${target}" in
- x86_64-*-linux* | i?86-*-linux* | aarch64-*-linux*)
+ x86_64-*-linux* | i?86-*-linux* | aarch64-*-linux* | riscv64-*-linux*)
configdirs="$configdirs gprofng"
;;
esac
@@ -412,7 +412,7 @@ enable_gprofng=$enableval,
enable_gprofng=yes)
if test "$enable_gprofng" = "yes"; then
case "${target}" in
- x86_64-*-linux* | i?86-*-linux* | aarch64-*-linux*)
+ x86_64-*-linux* | i?86-*-linux* | aarch64-*-linux* | riscv64-*-linux*)
configdirs="$configdirs gprofng"
;;
esac
@@ -2754,6 +2754,9 @@ core_pcbe_init (void)
return 0;
case X86_VENDOR_Intel:
break;
+ case ANDES_VENDOR_ID:
+ case SIFIVE_VENDOR_ID:
+ case THEAD_VENDOR_ID:
default:
return -1;
}
@@ -2915,7 +2918,7 @@ core_pcbe_impl_name (void)
static const char *
core_pcbe_cpuref (void)
{
-#if defined(__aarch64__)
+#if defined(__aarch64__) || defined(__riscv)
return "";
#elif defined(__i386__) || defined(__x86_64)
switch (cpuid_getmodel ())
@@ -42,6 +42,11 @@ __get_cpuid (unsigned int op ATTRIBUTE_UNUSED, unsigned int *eax,
Tprintf (DBG_LT0, "cpuid.c:%d read_cpuid_id() MIDR_EL1=0x%016x\n", __LINE__, *eax);
return res;
}
+#elif defined(__riscv)
+#include <sched.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <asm/hwprobe.h>
#endif
/*
@@ -104,7 +109,7 @@ my_cpuid (unsigned int op, cpuid_regs_t *regs)
TprintfT (DBG_LT1, "my_cpuid: __get_cpuid(0x%x, 0x%x, 0x%x, 0x%x, 0x%x) returns %d\n",
op, regs->eax, regs->ebx, regs->ecx, regs->edx, ret);
return ret;
-}
+}
#endif
static cpuid_info_t *
@@ -180,8 +185,30 @@ get_cpuid_info ()
cpi->cpi_model += CPI_MODEL_XTD (regs.eax) << 4;
break;
}
+#elif defined(__riscv)
+ #ifndef __riscv_hwprobe
+ cpi->cpi_vendor = 0;
+ cpi->cpi_family = 0;
+ cpi->cpi_model = 0;
+ #else
+ struct riscv_hwprobe res;
+ res.key = RISCV_HWPROBE_KEY_MVENDORID;
+ cpu_set_t cpu_set;
+ int __riscv_hwprobe (struct riscv_hwprobe *pairs, \
+ long pair_count, long cpu_count, \
+ unsigned long *cpus, unsigned long flags) \
+ {
+ return syscall(__NR_riscv_hwprobe, pairs, pair_count, cpu_count, cpus, flags);
+ }
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ long ret = __riscv_hwprobe(&res, 1, 1, &cpu_set, 0);
+ cpi->cpi_vendor = res.value;
+ cpi->cpi_family = 0;
+ cpi->cpi_model = 0;
+ #endif
#endif
- return cpi;
+ return cpi;
}
static inline uint_t
@@ -32,6 +32,7 @@
*/
#define ARCH(x) TOK_A_##x(ARCH)
#define TOK_A_Aarch64(x) x##_Aarch64
+#define TOK_A_RISCV(x) x##_RISCV
#define TOK_A_SPARC(x) x##_SPARC
#define TOK_A_Intel(x) x##_Intel
@@ -45,11 +46,13 @@
#define ARCH_Intel 1
#elif defined(__aarch64__)
#define ARCH_Aarch64 1
+#elif defined(riscv) || defined(__riscv)
+#define ARCH_RISCV 1
#else
#error "Undefined platform"
#endif
-#if defined(__sparcv9) || defined(__x86_64) || defined(__aarch64__)
+#if defined(__sparcv9) || defined(__x86_64) || defined(__aarch64__) || defined(__riscv)
#define WSIZE_64 1
#else
#define WSIZE_32 1
@@ -111,6 +111,13 @@ enum {
ARM_CPU_IMP_QCOM = 0x51
};
+// rscv Constants from arch/riscv/include/asm/vendorid_list.h
+enum {
+ ANDES_VENDOR_ID =0x31e,
+ SIFIVE_VENDOR_ID =0x489,
+ THEAD_VENDOR_ID =0x5b7
+};
+
#define AARCH64_VENDORSTR_ARM "ARM"
/* strings below must match those returned by cpc_getcpuver() */
@@ -98,7 +98,7 @@ typedef struct { /* supplementary data fields */
#define HW_INTERVAL_MAX UINT64_MAX
#define HW_INTERVAL_PRESET(x) (HW_INTERVAL_MAX - ((uint64_t)(x) - 1))
-#define HW_INTERVAL_TYPE(x) ((uint64_t) (x)
+#define HW_INTERVAL_TYPE(x) ((uint64_t) (x))
/* parsing */
#define HWCFUNCS_MAX_ATTRS 20
@@ -15788,6 +15788,10 @@ build_src=
build_src=true
build_collector=true
;;
+ riscv*-*-linux*)
+ build_src=true
+ build_collector=true
+ ;;
esac
# Check whether --enable-gprofng-tools was given.
if test "${enable_gprofng_tools+set}" = set; then :
@@ -63,6 +63,10 @@ build_src=
build_src=true
build_collector=true
;;
+ riscv*-*-linux*)
+ build_src=true
+ build_collector=true
+ ;;
esac
AC_ARG_ENABLE(gprofng-tools,
AS_HELP_STRING([--disable-gprofng-tools], [do not build gprofng/src directory]),
@@ -84,6 +84,16 @@ typedef struct MHwcntr_packet
(ucp)->uc_mcontext.regs[CONTEXT_PC] = (greg_t)(funcp); \
(ucp)->uc_mcontext.regs[CONTEXT_SP] = 0; \
(ucp)->uc_mcontext.regs[CONTEXT_FP] = 0;
+
+#elif ARCH(RISCV)
+#define CONTEXT_PC REG_PC
+#define CONTEXT_FP 8
+#define CONTEXT_SP 2
+#define SETFUNCTIONCONTEXT(ucp,funcp) \
+ (ucp)->uc_mcontext.__gregs[CONTEXT_PC] = (greg_t)(funcp); \
+ (ucp)->uc_mcontext.__gregs[CONTEXT_FP] = 0; \
+ (ucp)->uc_mcontext.__gregs[CONTEXT_SP] = 0;
+
#endif /* ARCH() */
#endif
@@ -91,7 +91,7 @@ __collector_gettid ()
#endif
__asm__ __volatile__(syscall_instr
: "=a" (r) : "0" (__NR_gettid)
- : syscall_clobber);
+ : syscall_clobber);
#else
r = syscall (__NR_gettid);
#endif
@@ -1459,7 +1459,10 @@ __collector_util_init ()
else if ((ptr = dlvsym (libc, "fopen", "GLIBC_2.0")) != NULL)
__collector_util_funcs.fopen = ptr;
else
+ {
ptr = dlsym (libc, "fopen");
+ if(ptr) __collector_util_funcs.fopen = ptr;
+ }
if (__collector_util_funcs.fopen == NULL)
{
CALL_UTIL (fprintf)(stderr, "COL_ERROR_UTIL_INIT fopen: %s\n", dlerror ());
@@ -1475,7 +1478,10 @@ __collector_util_init ()
else if ((ptr = dlvsym (libc, "popen", "GLIBC_2.0")) != NULL)
__collector_util_funcs.popen = ptr;
else
+ {
ptr = dlsym (libc, "popen");
+ if(ptr) __collector_util_funcs.popen = ptr;
+ }
if (__collector_util_funcs.popen == NULL)
{
CALL_UTIL (fprintf)(stderr, "COL_ERROR_UTIL_INIT popen: %s\n", dlerror ());
@@ -1491,7 +1497,10 @@ __collector_util_init ()
else if ((ptr = dlvsym (libc, "fclose", "GLIBC_2.0")) != NULL)
__collector_util_funcs.fclose = ptr;
else
+ {
ptr = dlsym (libc, "fclose");
+ if(ptr) __collector_util_funcs.fclose = ptr;
+ }
if (__collector_util_funcs.fclose == NULL)
{
CALL_UTIL (fprintf)(stderr, "COL_ERROR_UTIL_INIT fclose: %s\n", dlerror ());
@@ -209,10 +209,10 @@ static __attribute__ ((always_inline)) inline uint32_t
__collector_cas_32 (volatile uint32_t *pdata, uint32_t old, uint32_t new)
{
uint32_t r;
- __asm__ __volatile__("lock; cmpxchgl %2, %1"
- : "=a" (r), "=m" (*pdata) : "r" (new),
- "a" (old), "m" (*pdata));
- return r;
+ __asm__ __volatile__("lock; cmpxchgl %2, %1"
+ : "=a" (r), "=m" (*pdata) : "r" (new),
+ "a" (old), "m" (*pdata));
+ return r;
}
/**
* This function enables a compare and swap operation to occur atomically.
@@ -270,7 +270,7 @@ __collector_cas_ptr (void *mem, void *cmp, void *new)
return r;
}
-#elif ARCH(Aarch64)
+#elif ARCH(Aarch64) || ARCH(RISCV)
static __attribute__ ((always_inline)) inline uint32_t
__collector_inc_32 (volatile uint32_t *ptr)
{
@@ -186,8 +186,14 @@ memory_error_func (int status ATTRIBUTE_UNUSED, bfd_vma addr ATTRIBUTE_UNUSED,
#define GET_PC(ctx) (((ucontext_t*)ctx)->uc_mcontext.regs[15])
#define GET_SP(ctx) (((ucontext_t*)ctx)->uc_mcontext.regs[13])
#define GET_FP(ctx) (((ucontext_t*)ctx)->uc_mcontext.regs[14])
+
+#elif ARCH(RISCV)
+#define GET_PC(ctx) (((ucontext_t*)ctx)->uc_mcontext.__gregs[REG_PC])
+#define GET_SP(ctx) (((ucontext_t*)ctx)->uc_mcontext.__gregs[2])
+#define GET_FP(ctx) (((ucontext_t*)ctx)->uc_mcontext.__gregs[8])
#endif /* ARCH() */
+
/*
* FILL_CONTEXT() for all platforms
* Could use getcontext() except:
@@ -230,13 +236,19 @@ memory_error_func (int status ATTRIBUTE_UNUSED, bfd_vma addr ATTRIBUTE_UNUSED,
context->uc_stack.ss_size = 0x100000; \
}
-#elif ARCH(Aarch64)
+#elif ARCH(Aarch64)
#define FILL_CONTEXT(context) \
{ CALL_UTIL (getcontext) (context); \
context->uc_mcontext.sp = (__u64) __builtin_frame_address(0); \
}
-#endif /* ARCH() */
+#elif ARCH(RISCV)
+#define FILL_CONTEXT(context) \
+ { CALL_UTIL(getcontext)(context); \
+ context->uc_mcontext.__gregs[2] = (uint64_t) __builtin_frame_address(0); \
+ }
+
+#endif/* ARCH() */
static int
getByteInstruction (unsigned char *p)
@@ -94,6 +94,8 @@ Platform_t DbeSession::platform =
Sparc;
#elif ARCH(Aarch64)
Aarch64;
+#elif ARCH(RISCV)
+ RISCV;
#else // ARCH(Intel)
Intel;
#endif
@@ -208,6 +208,7 @@ Disasm::disasm_open ()
case Amd64:
need_swap_endian = (DbeSession::platform == Sparc);
break;
+ case RISCV:
case Sparcv8plus:
case Sparcv9:
case Sparc:
@@ -246,6 +247,7 @@ Disasm::disasm_open ()
dis_info.arch = bfd_arch_i386;
dis_info.mach = bfd_mach_x86_64;
break;
+ case RISCV:
case Sparcv8plus:
case Sparcv9:
case Sparc:
@@ -542,6 +542,8 @@ Experiment::ExperimentHandler::startElement (char*, char*, char *qName, Attribut
exp->platform = Intel;
else if (strcmp (str, "aarch64") == 0)
exp->platform = Aarch64;
+ else if (strcmp (str, "riscv64") == 0)
+ exp->platform = RISCV;
else
exp->platform = Sparc;
exp->need_swap_endian = (DbeSession::platform == Sparc) ?
@@ -332,6 +332,10 @@ collect::check_executable_arch (Elf *elf)
case EM_AARCH64:
is_64 = true;
break;
+#elif ARCH(RISCV)
+ case EM_RISCV:
+ is_64 = true;
+ break;
#endif
default:
return EXEC_ELF_ARCH;
@@ -51,9 +51,9 @@ extern const char *strsignal (int);
#endif
// _SC_CPUID_MAX is not available on 2.6/2.7
-#ifndef _SC_CPUID_MAX
-#define _SC_CPUID_MAX 517
-#endif
+
+// #define _SC_CPUID_MAX 517
+// #endif
const char *get_fstype (char *);
@@ -74,15 +74,16 @@ Coll_Ctrl::Coll_Ctrl (int _interactive, bool _defHWC, bool _kernelHWC)
default_stem = strdup ("test");
/* get CPU count and processor clock rate */
+ #ifndef _SC_CPUID_MAX
+ ncpus = sysconf (_SC_NPROCESSORS_CONF);
+ /* add 2048 to count, since on some systems CPUID does not start at zero */
+ ncpumax = ncpus + 2048;
+ #elif
ncpumax = sysconf (_SC_CPUID_MAX);
- if (ncpumax == -1)
- {
- ncpus = sysconf (_SC_NPROCESSORS_CONF);
- /* add 2048 to count, since on some systems CPUID does not start at zero */
- ncpumax = ncpus + 2048;
- }
- ncpus = 0;
- cpu_clk_freq = 0;
+ #endif
+
+ //ncpus = 0;
+ //cpu_clk_freq = 0;
// On Linux, read /proc/cpuinfo to get CPU count and clock rate
// Note that parsing is different on SPARC and x86
@@ -115,6 +116,9 @@ Coll_Ctrl::Coll_Ctrl (int _interactive, bool _defHWC, bool _kernelHWC)
#elif defined(__aarch64__)
asm volatile("mrs %0, cntfrq_el0" : "=r" (cpu_clk_freq));
+#elif defined(__riscv)
+ cpu_clk_freq = 1000;
+
#else
FILE *procf = fopen ("/proc/cpuinfo", "r");
if (procf != NULL)
@@ -42,7 +42,8 @@ enum Platform_t
Sparcv8plus,
Java,
Amd64,
- Aarch64
+ Aarch64,
+ RISCV
};
enum WSize_t
@@ -32,6 +32,8 @@
#define SPARC 1
#elif defined(__aarch64__)
#define Aarch64 1
+#elif defined(__riscv)
+#define RISCV 1
#else
#define Intel 1
#endif