[00/29] Add Support for AARCH64:ILP32

Message ID mvmoal78poi.fsf@hawking.suse.de
State New, archived
Headers

Commit Message

Andreas Schwab May 26, 2015, 12:54 p.m. UTC
  "Pinski, Andrew" <Andrew.Pinski@caviumnetworks.com> writes:

> The kernel uapi or elf abi?

The C ABI.

> You can find a "beta" version of the elf abi on arm's web site. I suspect there is a few assembly files that need to be changed to support ilp32. Memcpy.S is one of them. I can't dig up the patch to fix them tonight though. 

Something like this?
  

Comments

Pinski, Andrew May 26, 2015, 1:02 p.m. UTC | #1
> On May 26, 2015, at 8:54 PM, Andreas Schwab <schwab@suse.de> wrote:
> 
> "Pinski, Andrew" <Andrew.Pinski@caviumnetworks.com> writes:
> 
>> The kernel uapi or elf abi?
> 
> The C ABI.
> 
>> You can find a "beta" version of the elf abi on arm's web site. I suspect there is a few assembly files that need to be changed to support ilp32. Memcpy.S is one of them. I can't dig up the patch to fix them tonight though. 
> 
> Something like this?

Yes exactly like that. 

Thanks,
Andrew

> 
> diff --git a/sysdeps/aarch64/memcmp.S b/sysdeps/aarch64/memcmp.S
> index b0b34fa..51dcba9 100644
> --- a/sysdeps/aarch64/memcmp.S
> +++ b/sysdeps/aarch64/memcmp.S
> @@ -47,6 +47,11 @@
> #define mask        x13
> 
> ENTRY_ALIGN (memcmp, 6)
> +#ifdef __ILP32__
> +    uxtw    src1, src1
> +    uxtw    src2, src2
> +    uxtw    limit, limit
> +#endif
>    cbz    limit, L(ret0)
>    eor    tmp1, src1, src2
>    tst    tmp1, #7
> diff --git a/sysdeps/aarch64/memcpy.S b/sysdeps/aarch64/memcpy.S
> index b3d550e..63b3e2d 100644
> --- a/sysdeps/aarch64/memcpy.S
> +++ b/sysdeps/aarch64/memcpy.S
> @@ -47,6 +47,11 @@
> 
> ENTRY_ALIGN (memcpy, 6)
> 
> +#ifdef __ILP32__
> +    uxtw    dstin, dstin
> +    uxtw    src, src
> +    uxtw    count, count
> +#endif
>    mov    dst, dstin
>    cmp    count, #64
>    b.ge    L(cpy_not_short)
> diff --git a/sysdeps/aarch64/memmove.S b/sysdeps/aarch64/memmove.S
> index 8d0b328..82b571a 100644
> --- a/sysdeps/aarch64/memmove.S
> +++ b/sysdeps/aarch64/memmove.S
> @@ -47,6 +47,11 @@
> 
> ENTRY_ALIGN (memmove, 6)
> 
> +#ifdef __ILP32__
> +    uxtw    dstin, dstin
> +    uxtw    src, src
> +    uxtw    count, count
> +#endif
>    cmp    dstin, src
>    b.lo    L(downwards)
>    add    tmp1, src, count
> diff --git a/sysdeps/aarch64/memset.S b/sysdeps/aarch64/memset.S
> index 816640a..2ea2ace 100644
> --- a/sysdeps/aarch64/memset.S
> +++ b/sysdeps/aarch64/memset.S
> @@ -53,6 +53,10 @@
> 
> ENTRY_ALIGN (__memset, 6)
> 
> +#ifdef __ILP32__
> +    uxtw    dstin, dstin
> +    uxtw    count, count
> +#endif
>    mov    dst, dstin        /* Preserve return value.  */
>    ands    A_lw, val, #255
> #ifndef DONT_USE_DC
> -- 
> 2.4.1
> 
> 
> Andreas.
> 
> -- 
> Andreas Schwab, SUSE Labs, schwab@suse.de
> GPG Key fingerprint = 0196 BAD8 1CE9 1970 F4BE  1748 E4D4 88E3 0EEA B9D7
> "And now for something completely different."
  
Andreas Schwab May 26, 2015, 1:22 p.m. UTC | #2
"Pinski, Andrew" <Andrew.Pinski@caviumnetworks.com> writes:

>> On May 26, 2015, at 8:54 PM, Andreas Schwab <schwab@suse.de> wrote:
>> 
>> "Pinski, Andrew" <Andrew.Pinski@caviumnetworks.com> writes:
>> 
>>> The kernel uapi or elf abi?
>> 
>> The C ABI.
>> 
>>> You can find a "beta" version of the elf abi on arm's web site. I suspect there is a few assembly files that need to be changed to support ilp32. Memcpy.S is one of them. I can't dig up the patch to fix them tonight though. 
>> 
>> Something like this?
>
> Yes exactly like that. 

Well, not exactly, since I got the syntax wrong...

Andreas.
  

Patch

diff --git a/sysdeps/aarch64/memcmp.S b/sysdeps/aarch64/memcmp.S
index b0b34fa..51dcba9 100644
--- a/sysdeps/aarch64/memcmp.S
+++ b/sysdeps/aarch64/memcmp.S
@@ -47,6 +47,11 @@ 
 #define mask		x13
 
 ENTRY_ALIGN (memcmp, 6)
+#ifdef __ILP32__
+	uxtw	src1, src1
+	uxtw	src2, src2
+	uxtw	limit, limit
+#endif
 	cbz	limit, L(ret0)
 	eor	tmp1, src1, src2
 	tst	tmp1, #7
diff --git a/sysdeps/aarch64/memcpy.S b/sysdeps/aarch64/memcpy.S
index b3d550e..63b3e2d 100644
--- a/sysdeps/aarch64/memcpy.S
+++ b/sysdeps/aarch64/memcpy.S
@@ -47,6 +47,11 @@ 
 
 ENTRY_ALIGN (memcpy, 6)
 
+#ifdef __ILP32__
+	uxtw	dstin, dstin
+	uxtw	src, src
+	uxtw	count, count
+#endif
 	mov	dst, dstin
 	cmp	count, #64
 	b.ge	L(cpy_not_short)
diff --git a/sysdeps/aarch64/memmove.S b/sysdeps/aarch64/memmove.S
index 8d0b328..82b571a 100644
--- a/sysdeps/aarch64/memmove.S
+++ b/sysdeps/aarch64/memmove.S
@@ -47,6 +47,11 @@ 
 
 ENTRY_ALIGN (memmove, 6)
 
+#ifdef __ILP32__
+	uxtw	dstin, dstin
+	uxtw	src, src
+	uxtw	count, count
+#endif
 	cmp	dstin, src
 	b.lo	L(downwards)
 	add	tmp1, src, count
diff --git a/sysdeps/aarch64/memset.S b/sysdeps/aarch64/memset.S
index 816640a..2ea2ace 100644
--- a/sysdeps/aarch64/memset.S
+++ b/sysdeps/aarch64/memset.S
@@ -53,6 +53,10 @@ 
 
 ENTRY_ALIGN (__memset, 6)
 
+#ifdef __ILP32__
+	uxtw	dstin, dstin
+	uxtw	count, count
+#endif
 	mov	dst, dstin		/* Preserve return value.  */
 	ands	A_lw, val, #255
 #ifndef DONT_USE_DC