RISC-V: Fix memcpy() for GCC 13
Commit Message
GCC 13 does not define the __riscv_misaligned_* builtin defines. They are
supported by GCC 14 or later. Test for __riscv_misaligned_fast to select an
always correct memcpy() implementation for GCC 13.
Signed-off-by: Sebastian Huber <sebastian.huber@embedded-brains.de>
---
newlib/libc/machine/riscv/memcpy.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
Comments
OK, thanks :)
On Wed, Jul 2, 2025 at 8:41 AM Sebastian Huber
<sebastian.huber@embedded-brains.de> wrote:
>
> GCC 13 does not define the __riscv_misaligned_* builtin defines. They are
> supported by GCC 14 or later. Test for __riscv_misaligned_fast to select an
> always correct memcpy() implementation for GCC 13.
>
> Signed-off-by: Sebastian Huber <sebastian.huber@embedded-brains.de>
> ---
> newlib/libc/machine/riscv/memcpy.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/newlib/libc/machine/riscv/memcpy.c b/newlib/libc/machine/riscv/memcpy.c
> index 5d6b2f301..a27e0ecb1 100644
> --- a/newlib/libc/machine/riscv/memcpy.c
> +++ b/newlib/libc/machine/riscv/memcpy.c
> @@ -30,7 +30,7 @@ __libc_memcpy_bytewise (unsigned char *dst, const unsigned char *src,
> *dst++ = *src++;
> }
>
> -#if defined(__riscv_misaligned_slow) || defined(__riscv_misaligned_avoid)
> +#ifndef __riscv_misaligned_fast
> static uintxlen_t
> __libc_load_xlen (const void *src)
> {
> @@ -75,7 +75,7 @@ memcpy (void *__restrict aa, const void *__restrict bb, size_t n)
> * This uses only one aligned store for every four (or eight for XLEN == 64)
> * bytes of data.
> */
> -#if defined(__riscv_misaligned_slow) || defined(__riscv_misaligned_avoid)
> +#ifndef __riscv_misaligned_fast
> if (unlikely ((((uintptr_t)a & msk) != ((uintptr_t)b & msk))))
> {
> size_t dst_pad = (uintptr_t)a & msk;
> --
> 2.43.0
>
@@ -30,7 +30,7 @@ __libc_memcpy_bytewise (unsigned char *dst, const unsigned char *src,
*dst++ = *src++;
}
-#if defined(__riscv_misaligned_slow) || defined(__riscv_misaligned_avoid)
+#ifndef __riscv_misaligned_fast
static uintxlen_t
__libc_load_xlen (const void *src)
{
@@ -75,7 +75,7 @@ memcpy (void *__restrict aa, const void *__restrict bb, size_t n)
* This uses only one aligned store for every four (or eight for XLEN == 64)
* bytes of data.
*/
-#if defined(__riscv_misaligned_slow) || defined(__riscv_misaligned_avoid)
+#ifndef __riscv_misaligned_fast
if (unlikely ((((uintptr_t)a & msk) != ((uintptr_t)b & msk))))
{
size_t dst_pad = (uintptr_t)a & msk;