AArch64: Optimize memrchr

Message ID PAWPR08MB8982CC8DDEB31EF83DFDA98683FD9@PAWPR08MB8982.eurprd08.prod.outlook.com
State Committed
Commit 00776241776e67fc666b896c1e85770f4f3ec1e1
Headers
Series AArch64: Optimize memrchr |

Checks

Context Check Description
dj/TryBot-apply_patch success Patch applied to master at the time it was sent
dj/TryBot-32bit success Build for i686

Commit Message

Wilco Dijkstra Jan. 12, 2023, 3:57 p.m. UTC
  Optimize the main loop - large strings are 43% faster on modern CPUs.
Passes regress.

---
  

Comments

Szabolcs Nagy Jan. 13, 2023, 12:28 p.m. UTC | #1
The 01/12/2023 15:57, Wilco Dijkstra wrote:
> Optimize the main loop - large strings are 43% faster on modern CPUs.
> Passes regress.

please commit it, thanks.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>


> 
> ---
> 
> diff --git a/sysdeps/aarch64/memrchr.S b/sysdeps/aarch64/memrchr.S
> index 9d2d29a396d46d6c2e74e3ca637091e2f3d68d5e..621fc65109736646b74900db8d15c6f8a7c68895 100644
> --- a/sysdeps/aarch64/memrchr.S
> +++ b/sysdeps/aarch64/memrchr.S
> @@ -26,7 +26,6 @@
>   * MTE compatible.
>   */
> 
> -/* Arguments and results.  */
>  #define srcin          x0
>  #define chrin          w1
>  #define cntin          x2
> @@ -77,31 +76,34 @@ ENTRY (__memrchr)
>         csel    result, result, xzr, hi
>         ret
> 
> +       nop
>  L(start_loop):
> -       sub     tmp, end, src
> -       subs    cntrem, cntin, tmp
> +       subs    cntrem, src, srcin
>         b.ls    L(nomatch)
> 
>         /* Make sure that it won't overread by a 16-byte chunk */
> -       add     tmp, cntrem, 15
> -       tbnz    tmp, 4, L(loop32_2)
> +       sub     cntrem, cntrem, 1
> +       tbz     cntrem, 4, L(loop32_2)
> +       add     src, src, 16
> 
> -       .p2align 4
> +       .p2align 5
>  L(loop32):
> -       ldr     qdata, [src, -16]!
> +       ldr     qdata, [src, -32]!
>         cmeq    vhas_chr.16b, vdata.16b, vrepchr.16b
>         umaxp   vend.16b, vhas_chr.16b, vhas_chr.16b            /* 128->64 */
>         fmov    synd, dend
>         cbnz    synd, L(end)
> 
>  L(loop32_2):
> -       ldr     qdata, [src, -16]!
> +       ldr     qdata, [src, -16]
>         subs    cntrem, cntrem, 32
>         cmeq    vhas_chr.16b, vdata.16b, vrepchr.16b
> -       b.ls    L(end)
> +       b.lo    L(end_2)
>         umaxp   vend.16b, vhas_chr.16b, vhas_chr.16b            /* 128->64 */
>         fmov    synd, dend
>         cbz     synd, L(loop32)
> +L(end_2):
> +       sub     src, src, 16
>  L(end):
>         shrn    vend.8b, vhas_chr.8h, 4         /* 128->64 */
>         fmov    synd, dend
  

Patch

diff --git a/sysdeps/aarch64/memrchr.S b/sysdeps/aarch64/memrchr.S
index 9d2d29a396d46d6c2e74e3ca637091e2f3d68d5e..621fc65109736646b74900db8d15c6f8a7c68895 100644
--- a/sysdeps/aarch64/memrchr.S
+++ b/sysdeps/aarch64/memrchr.S
@@ -26,7 +26,6 @@ 
  * MTE compatible.
  */
 
-/* Arguments and results.  */
 #define srcin		x0
 #define chrin		w1
 #define cntin		x2
@@ -77,31 +76,34 @@  ENTRY (__memrchr)
 	csel	result, result, xzr, hi
 	ret
 
+	nop
 L(start_loop):
-	sub	tmp, end, src
-	subs	cntrem, cntin, tmp
+	subs	cntrem, src, srcin
 	b.ls	L(nomatch)
 
 	/* Make sure that it won't overread by a 16-byte chunk */
-	add	tmp, cntrem, 15
-	tbnz	tmp, 4, L(loop32_2)
+	sub	cntrem, cntrem, 1
+	tbz	cntrem, 4, L(loop32_2)
+	add	src, src, 16
 
-	.p2align 4
+	.p2align 5
 L(loop32):
-	ldr	qdata, [src, -16]!
+	ldr	qdata, [src, -32]!
 	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
 	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
 	fmov	synd, dend
 	cbnz	synd, L(end)
 
 L(loop32_2):
-	ldr	qdata, [src, -16]!
+	ldr	qdata, [src, -16]
 	subs	cntrem, cntrem, 32
 	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
-	b.ls	L(end)
+	b.lo	L(end_2)
 	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
 	fmov	synd, dend
 	cbz	synd, L(loop32)
+L(end_2):
+	sub	src, src, 16
 L(end):
 	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
 	fmov	synd, dend