[COMMITTED] aarch64: Use the L() macro for labels in memcmp

Message ID 20180202044809.2797-1-siddhesh@sourceware.org
State Committed
Headers

Commit Message

Siddhesh Poyarekar Feb. 2, 2018, 4:48 a.m. UTC
  The L() macro makes the assembly a bit more readable.  There's no change
to codegen on aarch64 from this change.

	* sysdeps/aarch64/memcmp.S: Use L() macro for labels.
---
 ChangeLog                |  2 ++
 sysdeps/aarch64/memcmp.S | 32 ++++++++++++++++----------------
 2 files changed, 18 insertions(+), 16 deletions(-)
  

Patch

diff --git a/ChangeLog b/ChangeLog
index 4b3be4ba84..6c6dac46e4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,7 @@ 
 2018-02-02  Siddhesh Poyarekar  <siddhesh@sourceware.org>
 
+	* sysdeps/aarch64/memcmp.S: Use L() macro for labels.
+
 	* benchtests/bench-memcmp.c: Print json instead of plain text.
 
 	* benchtests/bench-memcmp.c (do_test): Call realloc_buf for
diff --git a/sysdeps/aarch64/memcmp.S b/sysdeps/aarch64/memcmp.S
index ccc795adeb..ecd12061b2 100644
--- a/sysdeps/aarch64/memcmp.S
+++ b/sysdeps/aarch64/memcmp.S
@@ -44,7 +44,7 @@  ENTRY_ALIGN (memcmp, 6)
 	DELOUSE (2)
 
 	subs	limit, limit, 8
-	b.lo	.Lless8
+	b.lo	L(less8)
 
 	/* Limit >= 8, so check first 8 bytes using unaligned loads.  */
 	ldr	data1, [src1], 8
@@ -52,65 +52,65 @@  ENTRY_ALIGN (memcmp, 6)
 	and	tmp1, src1, 7
 	add	limit, limit, tmp1
 	cmp	data1, data2
-	bne	.Lreturn
+	bne	L(return)
 
 	/* Align src1 and adjust src2 with bytes not yet done.  */
 	sub	src1, src1, tmp1
 	sub	src2, src2, tmp1
 
 	subs	limit, limit, 8
-	b.ls	.Llast_bytes
+	b.ls	L(last_bytes)
 
 	/* Loop performing 8 bytes per iteration using aligned src1.
 	   Limit is pre-decremented by 8 and must be larger than zero.
 	   Exit if <= 8 bytes left to do or if the data is not equal.  */
 	.p2align 4
-.Lloop8:
+L(loop8):
 	ldr	data1, [src1], 8
 	ldr	data2, [src2], 8
 	subs	limit, limit, 8
 	ccmp	data1, data2, 0, hi  /* NZCV = 0b0000.  */
-	b.eq	.Lloop8
+	b.eq	L(loop8)
 
 	cmp	data1, data2
-	bne	.Lreturn
+	bne	L(return)
 
 	/* Compare last 1-8 bytes using unaligned access.  */
-.Llast_bytes:
+L(last_bytes):
 	ldr	data1, [src1, limit]
 	ldr	data2, [src2, limit]
 
 	/* Compare data bytes and set return value to 0, -1 or 1.  */
-.Lreturn:
+L(return):
 #ifndef __AARCH64EB__
 	rev	data1, data1
 	rev	data2, data2
 #endif
 	cmp     data1, data2
-.Lret_eq:
+L(ret_eq):
 	cset	result, ne
 	cneg	result, result, lo
 	ret
 
 	.p2align 4
 	/* Compare up to 8 bytes.  Limit is [-8..-1].  */
-.Lless8:
+L(less8):
 	adds	limit, limit, 4
-	b.lo	.Lless4
+	b.lo	L(less4)
 	ldr	data1w, [src1], 4
 	ldr	data2w, [src2], 4
 	cmp	data1w, data2w
-	b.ne	.Lreturn
+	b.ne	L(return)
 	sub	limit, limit, 4
-.Lless4:
+L(less4):
 	adds	limit, limit, 4
-	beq	.Lret_eq
-.Lbyte_loop:
+	beq	L(ret_eq)
+L(byte_loop):
 	ldrb	data1w, [src1], 1
 	ldrb	data2w, [src2], 1
 	subs	limit, limit, 1
 	ccmp	data1w, data2w, 0, ne	/* NZCV = 0b0000.  */
-	b.eq	.Lbyte_loop
+	b.eq	L(byte_loop)
 	sub	result, data1w, data2w
 	ret