[AArch64] Add optimized memchr

Message ID 002d01d0f795$0ce77eb0$26b67c10$@com
State Superseded
Headers

Commit Message

Wilco Dijkstra Sept. 25, 2015, 1:21 p.m. UTC
  An optimized memchr was missing for AArch64. This version is similar to strchr and is significantly
faster than the C version. Passes GLIBC tests.

OK for commit?

ChangeLog:
2015-09-25  Wilco Dijkstra  <wdijkstr@arm.com>
2015-09-25  Kevin Petit  <kevin.petit@arm.com>

	* sysdeps/aarch64/memchr.S (__memchr): New file.
---
 sysdeps/aarch64/memchr.S | 157 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 157 insertions(+)
 create mode 100644 sysdeps/aarch64/memchr.S
  

Comments

Ondrej Bilka Sept. 26, 2015, 8:45 a.m. UTC | #1
On Fri, Sep 25, 2015 at 02:21:13PM +0100, Wilco Dijkstra wrote:
> An optimized memchr was missing for AArch64. This version is similar to strchr and is significantly
> faster than the C version. Passes GLIBC tests.
> 
> OK for commit?
> 
> ChangeLog:
> 2015-09-25  Wilco Dijkstra  <wdijkstr@arm.com>
> 2015-09-25  Kevin Petit  <kevin.petit@arm.com>
> 
> 	* sysdeps/aarch64/memchr.S (__memchr): New file.

How you tested performance. I think that also here loading first 32
bytes unaligned should be better. Could you use dryrun to verify?

Also same optimization could be used for memrchr.
  
Wilco Dijkstra Sept. 28, 2015, 9:23 a.m. UTC | #2
> Ondřej Bílka wrote:
> On Fri, Sep 25, 2015 at 02:21:13PM +0100, Wilco Dijkstra wrote:
> > An optimized memchr was missing for AArch64. This version is similar to strchr and is
> significantly
> > faster than the C version. Passes GLIBC tests.
> >
> > OK for commit?
> >
> > ChangeLog:
> > 2015-09-25  Wilco Dijkstra  <wdijkstr@arm.com>
> > 2015-09-25  Kevin Petit  <kevin.petit@arm.com>
> >
> > 	* sysdeps/aarch64/memchr.S (__memchr): New file.
> 
> How you tested performance. I think that also here loading first 32
> bytes unaligned should be better. Could you use dryrun to verify?
> 
> Also same optimization could be used for memrchr.

I haven't tuned this at all, this is an existing implementation that
was added to Newlib last year but not yet ported to GLIBC.

For maximum performance doing the first 16/32 bytes unaligned will likely 
be fastest just like it was for strlen.

Wilco
  

Patch

diff --git a/sysdeps/aarch64/memchr.S b/sysdeps/aarch64/memchr.S
new file mode 100644
index 0000000..2f643dd
--- /dev/null
+++ b/sysdeps/aarch64/memchr.S
@@ -0,0 +1,157 @@ 
+/* memchr - find a character in a memory zone
+
+   Copyright (C) 2015 Free Software Foundation, Inc.
+
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64
+ * Neon Available.
+ */
+
+/* Arguments and results.  */
+#define srcin		x0
+#define chrin		w1
+#define cntin		x2
+
+#define result		x0
+
+#define src		x3
+#define	tmp		x4
+#define wtmp2		w5
+#define synd		x6
+#define soff		x9
+#define cntrem		x10
+
+#define vrepchr		v0
+#define vdata1		v1
+#define vdata2		v2
+#define vhas_chr1	v3
+#define vhas_chr2	v4
+#define vrepmask	v5
+#define vend		v6
+
+/*
+ * Core algorithm:
+ *
+ * For each 32-byte chunk we calculate a 64-bit syndrome value, with two bits
+ * per byte. For each tuple, bit 0 is set if the relevant byte matched the
+ * requested character and bit 1 is not used (faster than using a 32bit
+ * syndrome). Since the bits in the syndrome reflect exactly the order in which
+ * things occur in the original string, counting trailing zeros allows to
+ * identify exactly which byte has matched.
+ */
+
+ENTRY (__memchr)
+	/* Do not dereference srcin if no bytes to compare.  */
+	cbz	cntin, L(zero_length)
+	/*
+	 * Magic constant 0x40100401 allows us to identify which lane matches
+	 * the requested byte.
+	 */
+	mov	wtmp2, #0x0401
+	movk	wtmp2, #0x4010, lsl #16
+	dup	vrepchr.16b, chrin
+	/* Work with aligned 32-byte chunks */
+	bic	src, srcin, #31
+	dup	vrepmask.4s, wtmp2
+	ands	soff, srcin, #31
+	and	cntrem, cntin, #31
+	b.eq	L(loop)
+
+	/*
+	 * Input string is not 32-byte aligned. We calculate the syndrome
+	 * value for the aligned 32 bytes block containing the first bytes
+	 * and mask the irrelevant part.
+	 */
+
+	ld1	{vdata1.16b, vdata2.16b}, [src], #32
+	sub	tmp, soff, #32
+	adds	cntin, cntin, tmp
+	cmeq	vhas_chr1.16b, vdata1.16b, vrepchr.16b
+	cmeq	vhas_chr2.16b, vdata2.16b, vrepchr.16b
+	and	vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
+	and	vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
+	addp	vend.16b, vhas_chr1.16b, vhas_chr2.16b		/* 256->128 */
+	addp	vend.16b, vend.16b, vend.16b			/* 128->64 */
+	mov	synd, vend.2d[0]
+	/* Clear the soff*2 lower bits */
+	lsl	tmp, soff, #1
+	lsr	synd, synd, tmp
+	lsl	synd, synd, tmp
+	/* The first block can also be the last */
+	b.ls	L(masklast)
+	/* Have we found something already? */
+	cbnz	synd, L(tail)
+
+L(loop):
+	ld1	{vdata1.16b, vdata2.16b}, [src], #32
+	subs	cntin, cntin, #32
+	cmeq	vhas_chr1.16b, vdata1.16b, vrepchr.16b
+	cmeq	vhas_chr2.16b, vdata2.16b, vrepchr.16b
+	/* If we're out of data we finish regardless of the result */
+	b.ls	L(end)
+	/* Use a fast check for the termination condition */
+	orr	vend.16b, vhas_chr1.16b, vhas_chr2.16b
+	addp	vend.2d, vend.2d, vend.2d
+	mov	synd, vend.2d[0]
+	/* We're not out of data, loop if we haven't found the character */
+	cbz	synd, L(loop)
+
+L(end):
+	/* Termination condition found, let's calculate the syndrome value */
+	and	vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
+	and	vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
+	addp	vend.16b, vhas_chr1.16b, vhas_chr2.16b		/* 256->128 */
+	addp	vend.16b, vend.16b, vend.16b			/* 128->64 */
+	mov	synd, vend.2d[0]
+	/* Only do the clear for the last possible block */
+	b.hi	L(tail)
+
+L(masklast):
+	/* Clear the (32 - ((cntrem + soff) % 32)) * 2 upper bits */
+	add	tmp, cntrem, soff
+	and	tmp, tmp, #31
+	sub	tmp, tmp, #32
+	neg	tmp, tmp, lsl #1
+	lsl	synd, synd, tmp
+	lsr	synd, synd, tmp
+
+L(tail):
+	/* Count the trailing zeros using bit reversing */
+	rbit	synd, synd
+	/* Compensate the last post-increment */
+	sub	src, src, #32
+	/* Check that we have found a character */
+	cmp	synd, #0
+	/* And count the leading zeros */
+	clz	synd, synd
+	/* Compute the potential result */
+	add	result, src, synd, lsr #1
+	/* Select result or NULL */
+	csel	result, xzr, result, eq
+	ret
+
+L(zero_length):
+	mov	result, #0
+	ret
+END (__memchr)
+weak_alias (__memchr, memchr)
+libc_hidden_builtin_def (memchr)