From patchwork Fri Feb 2 04:48:07 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Siddhesh Poyarekar X-Patchwork-Id: 25759 Received: (qmail 44196 invoked by alias); 2 Feb 2018 04:48:32 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Delivered-To: mailing list libc-alpha@sourceware.org Received: (qmail 44100 invoked by uid 89); 2 Feb 2018 04:48:23 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-26.1 required=5.0 tests=BAYES_00, GIT_PATCH_0, GIT_PATCH_1, GIT_PATCH_2, GIT_PATCH_3, RCVD_IN_DNSWL_NONE, SPF_NEUTRAL autolearn=ham version=3.3.2 spammy= X-HELO: homiemail-a52.g.dreamhost.com From: Siddhesh Poyarekar To: libc-alpha@sourceware.org Subject: [COMMITTED] aarch64: Use the L() macro for labels in memcmp Date: Fri, 2 Feb 2018 10:18:07 +0530 Message-Id: <20180202044809.2797-1-siddhesh@sourceware.org> The L() macro makes the assembly a bit more readable. There's no change to codegen on aarch64 from this change. * sysdeps/aarch64/memcmp.S: Use L() macro for labels. --- ChangeLog | 2 ++ sysdeps/aarch64/memcmp.S | 32 ++++++++++++++++---------------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4b3be4ba84..6c6dac46e4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2018-02-02 Siddhesh Poyarekar + * sysdeps/aarch64/memcmp.S: Use L() macro for labels. + * benchtests/bench-memcmp.c: Print json instead of plain text. * benchtests/bench-memcmp.c (do_test): Call realloc_buf for diff --git a/sysdeps/aarch64/memcmp.S b/sysdeps/aarch64/memcmp.S index ccc795adeb..ecd12061b2 100644 --- a/sysdeps/aarch64/memcmp.S +++ b/sysdeps/aarch64/memcmp.S @@ -44,7 +44,7 @@ ENTRY_ALIGN (memcmp, 6) DELOUSE (2) subs limit, limit, 8 - b.lo .Lless8 + b.lo L(less8) /* Limit >= 8, so check first 8 bytes using unaligned loads. */ ldr data1, [src1], 8 @@ -52,65 +52,65 @@ ENTRY_ALIGN (memcmp, 6) and tmp1, src1, 7 add limit, limit, tmp1 cmp data1, data2 - bne .Lreturn + bne L(return) /* Align src1 and adjust src2 with bytes not yet done. */ sub src1, src1, tmp1 sub src2, src2, tmp1 subs limit, limit, 8 - b.ls .Llast_bytes + b.ls L(last_bytes) /* Loop performing 8 bytes per iteration using aligned src1. Limit is pre-decremented by 8 and must be larger than zero. Exit if <= 8 bytes left to do or if the data is not equal. */ .p2align 4 -.Lloop8: +L(loop8): ldr data1, [src1], 8 ldr data2, [src2], 8 subs limit, limit, 8 ccmp data1, data2, 0, hi /* NZCV = 0b0000. */ - b.eq .Lloop8 + b.eq L(loop8) cmp data1, data2 - bne .Lreturn + bne L(return) /* Compare last 1-8 bytes using unaligned access. */ -.Llast_bytes: +L(last_bytes): ldr data1, [src1, limit] ldr data2, [src2, limit] /* Compare data bytes and set return value to 0, -1 or 1. */ -.Lreturn: +L(return): #ifndef __AARCH64EB__ rev data1, data1 rev data2, data2 #endif cmp data1, data2 -.Lret_eq: +L(ret_eq): cset result, ne cneg result, result, lo ret .p2align 4 /* Compare up to 8 bytes. Limit is [-8..-1]. */ -.Lless8: +L(less8): adds limit, limit, 4 - b.lo .Lless4 + b.lo L(less4) ldr data1w, [src1], 4 ldr data2w, [src2], 4 cmp data1w, data2w - b.ne .Lreturn + b.ne L(return) sub limit, limit, 4 -.Lless4: +L(less4): adds limit, limit, 4 - beq .Lret_eq -.Lbyte_loop: + beq L(ret_eq) +L(byte_loop): ldrb data1w, [src1], 1 ldrb data2w, [src2], 1 subs limit, limit, 1 ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */ - b.eq .Lbyte_loop + b.eq L(byte_loop) sub result, data1w, data2w ret