powerpc64: strrchr optimization for power8

Message ID 1487070321-27700-1-git-send-email-raji@linux.vnet.ibm.com
State Committed
Headers

Commit Message

Rajalakshmi S Feb. 14, 2017, 11:05 a.m. UTC
  Changes from previous version [1]

  - Comments correction and alignment changes.

--
P7 code is used for <=32B strings and for > 32B vectorized loops are used.
This shows as an average 25% improvement depending on the position of search
character.  The performance is same for shorter strings.
Tested on ppc64 and ppc64le.

2017-02-14  Rajalakshmi Srinivasaraghavan  <raji@linux.vnet.ibm.com>

	* sysdeps/powerpc/powerpc64/multiarch/Makefile
	(sysdep_routines): Add strrchr-power8.
	* sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
	(strrchr): Add __strrchr_power8 to list of strrchr functions.
	* sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S: New file.
	* sysdeps/powerpc/powerpc64/multiarch/strrchr.c
	(strrchr): Add __strrchr_power8 to ifunc list.
	* sysdeps/powerpc/powerpc64/power8/strrchr.S: New file.
---
 sysdeps/powerpc/powerpc64/multiarch/Makefile       |   3 +-
 .../powerpc/powerpc64/multiarch/ifunc-impl-list.c  |   3 +
 .../powerpc/powerpc64/multiarch/strrchr-power8.S   |  39 ++
 sysdeps/powerpc/powerpc64/multiarch/strrchr.c      |   3 +
 sysdeps/powerpc/powerpc64/power8/strrchr.S         | 464 +++++++++++++++++++++
 5 files changed, 511 insertions(+), 1 deletion(-)
 create mode 100644 sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S
 create mode 100644 sysdeps/powerpc/powerpc64/power8/strrchr.S
  

Comments

Gabriel F T Gomes Feb. 20, 2017, 1:09 p.m. UTC | #1
LGTM.

On Tue, 14 Feb 2017 16:35:21 +0530
Rajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com> wrote:

> Changes from previous version [1]
> 
>   - Comments correction and alignment changes.
> 
> --
> P7 code is used for <=32B strings and for > 32B vectorized loops are used.
> This shows as an average 25% improvement depending on the position of search
> character.  The performance is same for shorter strings.
> Tested on ppc64 and ppc64le.
> 
> 2017-02-14  Rajalakshmi Srinivasaraghavan  <raji@linux.vnet.ibm.com>
> 
> 	* sysdeps/powerpc/powerpc64/multiarch/Makefile
> 	(sysdep_routines): Add strrchr-power8.
> 	* sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> 	(strrchr): Add __strrchr_power8 to list of strrchr functions.
> 	* sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S: New file.
> 	* sysdeps/powerpc/powerpc64/multiarch/strrchr.c
> 	(strrchr): Add __strrchr_power8 to ifunc list.
> 	* sysdeps/powerpc/powerpc64/power8/strrchr.S: New file.
> ---
>  sysdeps/powerpc/powerpc64/multiarch/Makefile       |   3 +-
>  .../powerpc/powerpc64/multiarch/ifunc-impl-list.c  |   3 +
>  .../powerpc/powerpc64/multiarch/strrchr-power8.S   |  39 ++
>  sysdeps/powerpc/powerpc64/multiarch/strrchr.c      |   3 +
>  sysdeps/powerpc/powerpc64/power8/strrchr.S         | 464 +++++++++++++++++++++
>  5 files changed, 511 insertions(+), 1 deletion(-)
>  create mode 100644 sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S
>  create mode 100644 sysdeps/powerpc/powerpc64/power8/strrchr.S
> 
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
> index f5889a3..0fc0ebc 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
> +++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
> @@ -14,7 +14,8 @@ sysdep_routines += memcpy-power7 memcpy-a2 memcpy-power6 memcpy-cell \
>  		   strchrnul-power8 strchrnul-power7 strchrnul-ppc64 \
>  		   strcpy-power8 strcpy-power7 strcpy-ppc64 stpcpy-power8 \
>  		   stpcpy-power7 stpcpy-ppc64 \
> -		   strrchr-power7 strrchr-ppc64 strncat-power7 strncat-ppc64 \
> +		   strrchr-power8 strrchr-power7 strrchr-ppc64 \
> +		   strncat-power7 strncat-ppc64 \
>  		   strncpy-power7 strncpy-ppc64 \
>  		   stpncpy-power8 stpncpy-power7 stpncpy-ppc64 \
>  		   strcmp-power9 strcmp-power8 strcmp-power7 strcmp-ppc64 \
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> index 209aec5..d77c47f 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
> @@ -281,6 +281,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
>    /* Support sysdeps/powerpc/powerpc64/multiarch/strrchr.c.  */
>    IFUNC_IMPL (i, name, strrchr,
>  	      IFUNC_IMPL_ADD (array, i, strrchr,
> +			      hwcap2 & PPC_FEATURE2_ARCH_2_07,
> +			      __strrchr_power8)
> +	      IFUNC_IMPL_ADD (array, i, strrchr,
>  			      hwcap & PPC_FEATURE_HAS_VSX,
>  			      __strrchr_power7)
>  	      IFUNC_IMPL_ADD (array, i, strrchr, 1,
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S
> new file mode 100644
> index 0000000..23365a1
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S
> @@ -0,0 +1,39 @@
> +/* Optimized strrchr implementation for POWER8.
> +   Copyright (C) 2017 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <sysdep.h>
> +
> +#undef ENTRY
> +#define ENTRY(name)						\
> +  .section ".text";						\
> +  ENTRY_2(__strrchr_power8)					\
> +  .align ALIGNARG(2);						\
> +  BODY_LABEL(__strrchr_power8):					\
> +  cfi_startproc;						\
> +  LOCALENTRY(__strrchr_power8)
> +
> +#undef END
> +#define END(name)						\
> +  cfi_endproc;							\
> +  TRACEBACK(__strrchr_power8)					\
> +  END_2(__strrchr_power8)
> +
> +#undef libc_hidden_builtin_def
> +#define libc_hidden_builtin_def(name)
> +
> +#include <sysdeps/powerpc/powerpc64/power8/strrchr.S>
> diff --git a/sysdeps/powerpc/powerpc64/multiarch/strrchr.c b/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
> index dc1d3d0..0f94c9d 100644
> --- a/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
> +++ b/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
> @@ -25,11 +25,14 @@
> 
>  extern __typeof (strrchr) __strrchr_ppc attribute_hidden;
>  extern __typeof (strrchr) __strrchr_power7 attribute_hidden;
> +extern __typeof (strrchr) __strrchr_power8 attribute_hidden;
>  #undef strrchr
> 
>  /* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
>     ifunc symbol properly.  */
>  libc_ifunc_redirected (__redirect_strrchr, strrchr,
> +		       (hwcap2 & PPC_FEATURE2_ARCH_2_07)
> +		       ? __strrchr_power8 :
>  		       (hwcap & PPC_FEATURE_HAS_VSX)
>  		       ? __strrchr_power7
>  		       : __strrchr_ppc);
> diff --git a/sysdeps/powerpc/powerpc64/power8/strrchr.S b/sysdeps/powerpc/powerpc64/power8/strrchr.S
> new file mode 100644
> index 0000000..8eb7485
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/power8/strrchr.S
> @@ -0,0 +1,464 @@
> +/* Optimized strrchr implementation for PowerPC64/POWER7 using cmpb insn.
> +   Copyright (C) 2017 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <sysdep.h>
> +
> +/* char *[r3] strrchr (char *s [r3], int c [r4])  */
> +/* TODO: change these to the actual instructions when the minimum required
> +   binutils allows it.  */
> +#define MTVRD(v,r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
> +#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
> +#define VBPERMQ(t,a,b)  .long (0x1000054c \
> +				| ((t)<<(32-11)) \
> +				| ((a)<<(32-16)) \
> +				| ((b)<<(32-21)) )
> +#define VCLZD(r,v) .long (0x100007c2 | ((r)<<(32-11)) | ((v)<<(32-21)))
> +#define VPOPCNTD(r,v) .long (0x100007c3 | ((r)<<(32-11)) | ((v)<<(32-21)))
> +#define VADDUQM(t,a,b)  .long (0x10000100 \
> +				| ((t)<<(32-11)) \
> +				| ((a)<<(32-16)) \
> +				| ((b)<<(32-21)) )
> +#ifdef __LITTLE_ENDIAN__
> +/* Find the match position from v6 and place result in r6.  */
> +# define CALCULATE_MATCH() \
> +	VBPERMQ(v6, v6, v10); \
> +	vsldoi	v6, v6, v6, 6; \
> +	MFVRD(r7, v6); \
> +	cntlzd	r6, r7; \
> +	subfic	r6, r6, 15;
> +/*
> + * Find the first null position to mask bytes after null.
> + * (reg): vcmpequb result: v2 for 1st qw v3 for 2nd qw.
> + * Result placed at v2.
> + */
> +# define FIND_NULL_POS(reg) \
> +	vspltisb	v11, -1; \
> +	VADDUQM(v11, reg, v11); \
> +	vandc	v11, v11, reg; \
> +	VPOPCNTD(v2, v11); \
> +	vspltb	v11, v2, 15; \
> +	vcmpequb.	v11, v11, v9; \
> +	blt	cr6, 1f; \
> +	vsldoi	v9, v0, v9, 1; \
> +	vslo	v2, v2, v9; \
> +1: \
> +	vsumsws	v2, v2, v0;
> +#else
> +# define CALCULATE_MATCH() \
> +	VBPERMQ(v6, v6, v10); \
> +	MFVRD(r7, v6); \
> +	addi	r6, r7, -1; \
> +	andc	r6, r6, r7; \
> +	popcntd	r6, r6; \
> +	subfic	r6, r6, 15;
> +# define FIND_NULL_POS(reg) \
> +	VCLZD(v2, reg); \
> +	vspltb	v11, v2, 7; \
> +	vcmpequb.	v11, v11, v9; \
> +	blt	cr6, 1f; \
> +	vsldoi	v9, v0, v9, 1; \
> +	vsro	v2, v2, v9; \
> +1: \
> +	vsumsws	v2, v2, v0;
> +#endif	/* !__LITTLE_ENDIAN__  */
> +	.machine  power7
> +ENTRY (strrchr)
> +	CALL_MCOUNT 2
> +	dcbt	0,r3
> +	clrrdi	r8,r3,3	      /* Align the address to doubleword boundary.  */
> +	cmpdi	cr7,r4,0
> +	ld	r12,0(r8)     /* Load doubleword from memory.  */
> +	li	r9,0	      /* Used to store last occurence.  */
> +	li	r0,0	      /* Doubleword with null chars to use
> +				 with cmpb.  */
> +
> +	rlwinm	r6,r3,3,26,28 /* Calculate padding.  */
> +
> +	beq	cr7,L(null_match)
> +
> +	/* Replicate byte to doubleword.  */
> +	insrdi	r4,r4,8,48
> +	insrdi	r4,r4,16,32
> +	insrdi	r4,r4,32,0
> +
> +	/* r4 is changed now.  If it's passed more chars, then
> +	   check for null again.  */
> +	cmpdi	cr7,r4,0
> +	beq	cr7,L(null_match)
> +	/* Now r4 has a doubleword of c bytes and r0 has
> +	   a doubleword of null bytes.  */
> +
> +	cmpb	r10,r12,r4     /* Compare each byte against c byte.  */
> +	cmpb	r11,r12,r0     /* Compare each byte against null byte.  */
> +
> +	/* Move the doublewords left and right to discard the bits that are
> +	   not part of the string and bring them back as zeros.  */
> +#ifdef __LITTLE_ENDIAN__
> +	srd	r10,r10,r6
> +	srd	r11,r11,r6
> +	sld	r10,r10,r6
> +	sld	r11,r11,r6
> +#else
> +	sld	r10,r10,r6
> +	sld	r11,r11,r6
> +	srd	r10,r10,r6
> +	srd	r11,r11,r6
> +#endif
> +	or	r5,r10,r11    /* OR the results to speed things up.  */
> +	cmpdi	cr7,r5,0      /* If r5 == 0, no c or null bytes
> +				 have been found.  */
> +	bne	cr7,L(done)
> +
> +L(align):
> +	andi.	r12, r8, 15
> +
> +	/* Are we now aligned to a doubleword boundary?  If so, skip to
> +	   the main loop.  Otherwise, go through the alignment code.  */
> +
> +	bne	cr0, L(loop)
> +
> +	/* Handle WORD2 of pair.  */
> +	ldu	r12,8(r8)
> +	cmpb	r10,r12,r4
> +	cmpb	r11,r12,r0
> +	or	r5,r10,r11
> +	cmpdi	cr7,r5,0
> +	bne	cr7,L(done)
> +	b	L(loop)	      /* We branch here (rather than falling through)
> +				 to skip the nops due to heavy alignment
> +				 of the loop below.  */
> +	.p2align  5
> +L(loop):
> +	/* Load two doublewords, compare and merge in a
> +	   single register for speed.  This is an attempt
> +	   to speed up the null-checking process for bigger strings.  */
> +	ld	r12,8(r8)
> +	ldu	r7,16(r8)
> +	cmpb	r10,r12,r4
> +	cmpb	r11,r12,r0
> +	cmpb	r6,r7,r4
> +	cmpb	r7,r7,r0
> +	or	r12,r10,r11
> +	or	r5,r6,r7
> +	or	r5,r12,r5
> +	cmpdi	cr7,r5,0
> +	beq	cr7,L(vector)
> +
> +	/* OK, one (or both) of the doublewords contains a c/null byte.  Check
> +	   the first doubleword and decrement the address in case the first
> +	   doubleword really contains a c/null byte.  */
> +	cmpdi	cr6,r12,0
> +	addi	r8,r8,-8
> +	bne	cr6,L(done)
> +
> +	/* The c/null byte must be in the second doubleword.  Adjust the
> +	   address again and move the result of cmpb to r10 so we can calculate
> +	   the pointer.  */
> +
> +	mr	r10,r6
> +	mr	r11,r7
> +	addi	r8,r8,8
> +
> +	/* r10/r11 have the output of the cmpb instructions, that is,
> +	   0xff in the same position as the c/null byte in the original
> +	   doubleword from the string.  Use that to calculate the pointer.  */
> +
> +L(done):
> +	/* If there are more than one 0xff in r11, find the first position of
> +	   0xff in r11 and fill r10 with 0 from that position.  */
> +	cmpdi	cr7,r11,0
> +	beq	cr7,L(no_null)
> +#ifdef __LITTLE_ENDIAN__
> +	addi	r3,r11,-1
> +	andc	r3,r3,r11
> +	popcntd r0,r3
> +#else
> +	cntlzd	r0,r11
> +#endif
> +	subfic	r0,r0,63
> +	li	r6,-1
> +#ifdef __LITTLE_ENDIAN__
> +	srd	r0,r6,r0
> +#else
> +	sld	r0,r6,r0
> +#endif
> +	and	r10,r0,r10
> +L(no_null):
> +#ifdef __LITTLE_ENDIAN__
> +	cntlzd	r0,r10		/* Count leading zeros before c matches.  */
> +	addi	r3,r10,-1
> +	andc	r3,r3,r10
> +	addi	r10,r11,-1
> +	andc	r10,r10,r11
> +	cmpld	cr7,r3,r10
> +	bgt	cr7,L(no_match)
> +#else
> +	addi	r3,r10,-1	/* Count trailing zeros before c matches.  */
> +	andc	r3,r3,r10
> +	popcntd	r0,r3
> +	cmpld	cr7,r11,r10
> +	bgt	cr7,L(no_match)
> +#endif
> +	srdi	r0,r0,3		/* Convert trailing zeros to bytes.  */
> +	subfic	r0,r0,7
> +	add	r9,r8,r0      /* Return address of the matching c byte
> +				 or null in case c was not found.  */
> +	li	r0,0
> +	cmpdi	cr7,r11,0     /* If r11 == 0, no null's have been found.  */
> +	beq	cr7,L(align)
> +
> +	.align	4
> +L(no_match):
> +	mr	r3,r9
> +	blr
> +
> +/* Check the first 32B in GPR's and move to vectorized loop.  */
> +	.p2align  5
> +L(vector):
> +	addi	r3, r8, 8
> +	/* Make sure 32B aligned.  */
> +	andi.	r10, r3, 31
> +	bne	cr0, L(loop)
> +	vspltisb	v0, 0
> +	/* Precompute vbpermq constant.  */
> +	vspltisb	v10, 3
> +	lvsl	v11, r0, r0
> +	vslb	v10, v11, v10
> +	MTVRD(v1, r4)
> +	li	r5, 16
> +	vspltb	v1, v1, 7
> +	/* Compare 32 bytes in each loop.  */
> +L(continue):
> +	lvx	v4, 0, r3
> +	lvx	v5, r3, r5
> +	vcmpequb	v2, v0, v4
> +	vcmpequb	v3, v0, v5
> +	vcmpequb	v6, v1, v4
> +	vcmpequb	v7, v1, v5
> +	vor	v8, v2, v3
> +	vor	v9, v6, v7
> +	vor	v11, v8, v9
> +	vcmpequb.	v11, v0, v11
> +	addi	r3, r3, 32
> +	blt	cr6, L(continue)
> +	vcmpequb.	v8, v0, v8
> +	blt	cr6, L(match)
> +
> +	/* One (or both) of the quadwords contains c/null.  */
> +	vspltisb	v8, 2
> +	vspltisb	v9, 5
> +	/* Precompute values used for comparison.  */
> +	vsl	v9, v8, v9	/* v9 = 0x4040404040404040.  */
> +	vaddubm	v8, v9, v9
> +	vsldoi	v8, v0, v8, 1	/* v8 = 0x80.  */
> +
> +	/* Check if null is in second qw.  */
> +	vcmpequb.	v11, v0, v2
> +	blt	cr6, L(secondqw)
> +
> +	/* Null found in first qw.  */
> +	addi	r8, r3, -32
> +	/* Calculate the null position.  */
> +	FIND_NULL_POS(v2)
> +	/* Check if null is in the first byte.  */
> +	vcmpequb.	v11, v0, v2
> +	blt	cr6, L(no_match)
> +	vsububm	v2, v8, v2
> +	/* Mask unwanted bytes after null.  */
> +#ifdef __LITTLE_ENDIAN__
> +	vslo	v6, v6, v2
> +	vsro	v6, v6, v2
> +#else
> +	vsro	v6, v6, v2
> +	vslo	v6, v6, v2
> +#endif
> +	vcmpequb.	v11, v0, v6
> +	blt	cr6, L(no_match)
> +	/* Found a match before null.  */
> +	CALCULATE_MATCH()
> +	add	r3, r8, r6
> +	blr
> +
> +L(secondqw):
> +	addi	r8, r3, -16
> +	FIND_NULL_POS(v3)
> +	vcmpequb.	v11, v0, v2
> +	blt	cr6, L(no_match1)
> +	vsububm	v2, v8, v2
> +	/* Mask unwanted bytes after null.  */
> +#ifdef __LITTLE_ENDIAN__
> +	vslo	v7, v7, v2
> +	vsro	v7, v7, v2
> +#else
> +	vsro	v7, v7, v2
> +	vslo	v7, v7, v2
> +#endif
> +	vcmpequb.	v11, v0, v7
> +	blt	cr6, L(no_match1)
> +	addi	r8, r8, 16
> +	vor	v6, v0, v7
> +L(no_match1):
> +	addi	r8, r8, -16
> +	vcmpequb.	v11, v0, v6
> +	blt	cr6, L(no_match)
> +	/* Found a match before null.  */
> +	CALCULATE_MATCH()
> +	add	r3, r8, r6
> +	blr
> +
> +L(match):
> +	/* One (or both) of the quadwords contains a match.  */
> +	mr	r8, r3
> +	vcmpequb.	v8, v0, v7
> +	blt	cr6, L(firstqw)
> +	/* Match found in second qw.  */
> +	addi	r8, r8, 16
> +	vor	v6, v0, v7
> +L(firstqw):
> +	addi	r8, r8, -32
> +	CALCULATE_MATCH()
> +	add	r9, r8, r6      /* Compute final length.  */
> +	b	L(continue)
> +/* We are here because strrchr was called with a null byte.  */
> +	.align	4
> +L(null_match):
> +	/* r0 has a doubleword of null bytes.  */
> +
> +	cmpb	r5,r12,r0     /* Compare each byte against null bytes.  */
> +
> +	/* Move the doublewords left and right to discard the bits that are
> +	   not part of the string and bring them back as zeros.  */
> +#ifdef __LITTLE_ENDIAN__
> +	srd	r5,r5,r6
> +	sld	r5,r5,r6
> +#else
> +	sld	r5,r5,r6
> +	srd	r5,r5,r6
> +#endif
> +	cmpdi	cr7,r5,0      /* If r5 == 0, no c or null bytes
> +				 have been found.  */
> +	bne	cr7,L(done_null)
> +
> +	andi.	r12, r8, 15
> +
> +	/* Are we now aligned to a quadword boundary?  If so, skip to
> +	   the main loop.  Otherwise, go through the alignment code.  */
> +
> +	bne	cr0, L(loop_null)
> +
> +	/* Handle WORD2 of pair.  */
> +	ldu	r12,8(r8)
> +	cmpb	r5,r12,r0
> +	cmpdi	cr7,r5,0
> +	bne	cr7,L(done_null)
> +	b	L(loop_null)  /* We branch here (rather than falling through)
> +				 to skip the nops due to heavy alignment
> +				 of the loop below.  */
> +
> +	/* Main loop to look for the end of the string.  Since it's a
> +	   small loop (< 8 instructions), align it to 32-bytes.  */
> +	.p2align  5
> +L(loop_null):
> +	/* Load two doublewords, compare and merge in a
> +	   single register for speed.  This is an attempt
> +	   to speed up the null-checking process for bigger strings.  */
> +	ld	r12,8(r8)
> +	ldu	r11,16(r8)
> +	cmpb	r5,r12,r0
> +	cmpb	r10,r11,r0
> +	or	r6,r5,r10
> +	cmpdi	cr7,r6,0
> +	beq	cr7,L(vector1)
> +
> +	/* OK, one (or both) of the doublewords contains a null byte.  Check
> +	   the first doubleword and decrement the address in case the first
> +	   doubleword really contains a null byte.  */
> +
> +	cmpdi	cr6,r5,0
> +	addi	r8,r8,-8
> +	bne	cr6,L(done_null)
> +
> +	/* The null byte must be in the second doubleword.  Adjust the address
> +	   again and move the result of cmpb to r10 so we can calculate the
> +	   pointer.  */
> +
> +	mr	r5,r10
> +	addi	r8,r8,8
> +
> +	/* r5 has the output of the cmpb instruction, that is, it contains
> +	   0xff in the same position as the null byte in the original
> +	   doubleword from the string.  Use that to calculate the pointer.  */
> +L(done_null):
> +#ifdef __LITTLE_ENDIAN__
> +	addi	r0,r5,-1
> +	andc	r0,r0,r5
> +	popcntd	r0,r0
> +#else
> +	cntlzd	r0,r5	      /* Count leading zeros before the match.  */
> +#endif
> +	srdi	r0,r0,3	      /* Convert trailing zeros to bytes.  */
> +	add	r3,r8,r0      /* Return address of the matching null byte.  */
> +	blr
> +/* Check the first 32B in GPR's and move to vectorized loop.  */
> +	.p2align  5
> +L(vector1):
> +	addi	r3, r8, 8
> +	/* Make sure 32B aligned.  */
> +	andi.	r10, r3, 31
> +	bne	cr0, L(loop_null)
> +	vspltisb	v0, 0
> +	/* Precompute vbpermq constant.  */
> +	vspltisb	v10, 3
> +	lvsl	v11, r0, r0
> +	vslb	v10, v11, v10
> +	li	r5, 16
> +	/* Compare 32 bytes in each loop.  */
> +L(continue1):
> +	lvx	v4, 0, r3
> +	lvx	v5, r3, r5
> +	vcmpequb	v2, v0, v4
> +	vcmpequb	v3, v0, v5
> +	vor	v8, v2, v3
> +	vcmpequb.	v11, v0, v8
> +	addi	r3, r3, 32
> +	blt	cr6, L(continue1)
> +	addi	r3, r3, -32
> +	VBPERMQ(v2, v2, v10)
> +	VBPERMQ(v3, v3, v10)
> +	/* Shift each component into its correct position for merging.  */
> +#ifdef __LITTLE_ENDIAN__
> +	vsldoi	v3, v3, v3, 2
> +#else
> +	vsldoi	v2, v2, v2, 6
> +	vsldoi	v3, v3, v3, 4
> +#endif
> +	/* Merge the results and move to a GPR.  */
> +	vor	v4, v3, v2
> +	MFVRD(r5, v4)
> +#ifdef __LITTLE_ENDIAN__
> +	addi	r6, r5, -1
> +	andc	r6, r6, r5
> +	popcntd	r6, r6
> +#else
> +	cntlzd	r6, r5  /* Count leading zeros before the match.  */
> +#endif
> +	add	r3, r3, r6      /* Compute final length.  */
> +	blr
> +END (strrchr)
> +weak_alias (strrchr, rindex)
> +libc_hidden_builtin_def (strrchr)
  
Carlos O'Donell Feb. 20, 2017, 1:42 p.m. UTC | #2
On 02/14/2017 06:05 AM, Rajalakshmi Srinivasaraghavan wrote:
> P7 code is used for <=32B strings and for > 32B vectorized loops are used.
> This shows as an average 25% improvement depending on the position of search
> character.  The performance is same for shorter strings.
> Tested on ppc64 and ppc64le.

What did you use to test the 25% improvement?
  
Rajalakshmi S Feb. 20, 2017, 4:01 p.m. UTC | #3
On 02/20/2017 07:12 PM, Carlos O'Donell wrote:
> On 02/14/2017 06:05 AM, Rajalakshmi Srinivasaraghavan wrote:
>> P7 code is used for <=32B strings and for > 32B vectorized loops are used.
>> This shows as an average 25% improvement depending on the position of search
>> character.  The performance is same for shorter strings.
>> Tested on ppc64 and ppc64le.
> What did you use to test the 25% improvement?

This improvement is seen when compared to power7. Benchtest is
modified to use length from 0 to 400  to find the average for
different lengths.
  
Carlos O'Donell Feb. 20, 2017, 4:06 p.m. UTC | #4
On 02/20/2017 11:01 AM, Rajalakshmi Srinivasaraghavan wrote:
> 
> 
> On 02/20/2017 07:12 PM, Carlos O'Donell wrote:
>> On 02/14/2017 06:05 AM, Rajalakshmi Srinivasaraghavan wrote:
>>> P7 code is used for <=32B strings and for > 32B vectorized loops are used.
>>> This shows as an average 25% improvement depending on the position of search
>>> character.  The performance is same for shorter strings.
>>> Tested on ppc64 and ppc64le.
>> What did you use to test the 25% improvement?
> 
> This improvement is seen when compared to power7. Benchtest is
> modified to use length from 0 to 400  to find the average for
> different lengths.
 
Could you post your modifications for review an explain your
process in a little more detail. I'm curious about the changes
you made.
  
Rajalakshmi S Feb. 20, 2017, 4:49 p.m. UTC | #5
On 02/20/2017 09:36 PM, Carlos O'Donell wrote:
> On 02/20/2017 11:01 AM, Rajalakshmi Srinivasaraghavan wrote:
>>
>> On 02/20/2017 07:12 PM, Carlos O'Donell wrote:
>>> On 02/14/2017 06:05 AM, Rajalakshmi Srinivasaraghavan wrote:
>>>> P7 code is used for <=32B strings and for > 32B vectorized loops are used.
>>>> This shows as an average 25% improvement depending on the position of search
>>>> character.  The performance is same for shorter strings.
>>>> Tested on ppc64 and ppc64le.
>>> What did you use to test the 25% improvement?
>> This improvement is seen when compared to power7. Benchtest is
>> modified to use length from 0 to 400  to find the average for
>> different lengths.
> Could you post your modifications for review an explain your
> process in a little more detail. I'm curious about the changes
> you made.

I modified benchtest/bench-strrchr.c to measure only the following loop
and commented the existing 'for' loops.

   for (i = 0; i < 400; ++i)
     {
       do_test (0, i, i + 1, 0, SMALL_CHAR);
       do_test (i, i, i + 1, 0, BIG_CHAR);
     }

Then the benchtests generated is copied to a spreadsheet to
calculate the improvement.

Attached is the benchtests result for ppc64le.
Note: The numbers vary slightly from run to run.
  
Carlos Eduardo Seo Feb. 21, 2017, 5:01 p.m. UTC | #6
On 2/14/17, 9:05 AM, "Rajalakshmi Srinivasaraghavan" <libc-alpha-owner@sourceware.org on behalf of raji@linux.vnet.ibm.com> wrote:

    Changes from previous version [1]
    
      - Comments correction and alignment changes.
    
    --
    P7 code is used for <=32B strings and for > 32B vectorized loops are used.
    This shows as an average 25% improvement depending on the position of search
    character.  The performance is same for shorter strings.
    Tested on ppc64 and ppc64le.
    
LGTM.

--
Carlos Eduardo Seo
Software Engineer - Linux on Power Toolchain
cseo@linux.vnet.ibm.com
  
Rajalakshmi S Feb. 28, 2017, 7:32 a.m. UTC | #7
On 02/20/2017 09:36 PM, Carlos O'Donell wrote:
> On 02/20/2017 11:01 AM, Rajalakshmi Srinivasaraghavan wrote:
>>
>>
>> On 02/20/2017 07:12 PM, Carlos O'Donell wrote:
>>> On 02/14/2017 06:05 AM, Rajalakshmi Srinivasaraghavan wrote:
>>>> P7 code is used for <=32B strings and for > 32B vectorized loops are used.
>>>> This shows as an average 25% improvement depending on the position of search
>>>> character.  The performance is same for shorter strings.
>>>> Tested on ppc64 and ppc64le.
>>> What did you use to test the 25% improvement?
>>
>> This improvement is seen when compared to power7. Benchtest is
>> modified to use length from 0 to 400  to find the average for
>> different lengths.
>
> Could you post your modifications for review an explain your
> process in a little more detail. I'm curious about the changes
> you made.

Carlos,
Posted benchtest modification here:
https://sourceware.org/ml/libc-alpha/2017-02/msg00380.html
  
Rajalakshmi S March 9, 2017, 6:14 a.m. UTC | #8
On 02/28/2017 01:02 PM, Rajalakshmi Srinivasaraghavan wrote:
>
>
> On 02/20/2017 09:36 PM, Carlos O'Donell wrote:
>> On 02/20/2017 11:01 AM, Rajalakshmi Srinivasaraghavan wrote:
>>>
>>>
>>> On 02/20/2017 07:12 PM, Carlos O'Donell wrote:
>>>> On 02/14/2017 06:05 AM, Rajalakshmi Srinivasaraghavan wrote:
>>>>> P7 code is used for <=32B strings and for > 32B vectorized loops
>>>>> are used.
>>>>> This shows as an average 25% improvement depending on the position
>>>>> of search
>>>>> character.  The performance is same for shorter strings.
>>>>> Tested on ppc64 and ppc64le.
>>>> What did you use to test the 25% improvement?
>>>
>>> This improvement is seen when compared to power7. Benchtest is
>>> modified to use length from 0 to 400  to find the average for
>>> different lengths.
>>
>> Could you post your modifications for review an explain your
>> process in a little more detail. I'm curious about the changes
>> you made.
>
> Carlos,
> Posted benchtest modification here:
> https://sourceware.org/ml/libc-alpha/2017-02/msg00380.html

Carlos,

Do you have further comments?
  
Carlos O'Donell March 17, 2017, 3:38 p.m. UTC | #9
On 03/09/2017 01:14 AM, Rajalakshmi Srinivasaraghavan wrote:
> 
> 
> On 02/28/2017 01:02 PM, Rajalakshmi Srinivasaraghavan wrote:
>>
>>
>> On 02/20/2017 09:36 PM, Carlos O'Donell wrote:
>>> On 02/20/2017 11:01 AM, Rajalakshmi Srinivasaraghavan wrote:
>>>>
>>>>
>>>> On 02/20/2017 07:12 PM, Carlos O'Donell wrote:
>>>>> On 02/14/2017 06:05 AM, Rajalakshmi Srinivasaraghavan wrote:
>>>>>> P7 code is used for <=32B strings and for > 32B vectorized loops
>>>>>> are used.
>>>>>> This shows as an average 25% improvement depending on the position
>>>>>> of search
>>>>>> character.  The performance is same for shorter strings.
>>>>>> Tested on ppc64 and ppc64le.
>>>>> What did you use to test the 25% improvement?
>>>>
>>>> This improvement is seen when compared to power7. Benchtest is
>>>> modified to use length from 0 to 400  to find the average for
>>>> different lengths.
>>>
>>> Could you post your modifications for review an explain your
>>> process in a little more detail. I'm curious about the changes
>>> you made.
>>
>> Carlos,
>> Posted benchtest modification here:
>> https://sourceware.org/ml/libc-alpha/2017-02/msg00380.html
> 
> Carlos,
> 
> Do you have further comments?
 
This is exactly what I was interested in seeing, and I see Siddhesh
has approved your commit to benchtests to increase the string lengths
used in the analysis.

When I review these changes I look at:

(a) What microbenchmark did you use?

- Can we include it in glibc?

  * We did, your improvements should be going into master so others
    can reproduce them.

(b) What assumptions did you make and were they valid?

Increasing the microbenchmarks to measure up to 512 bytes is probably
a good thing to give broad coverage over the performance from small
to large strings that are multiples of most cache lines (and places
where prefetching might start helping).

Does IBM internally have any good data about what the low, median,
average, and high lengths of strings that are being used with the
strrchr API? Such gathered statistical data would allow us to tune
the microbencharmk.

Knowing the mean value of string lengths would let us decide where
to place most of optimization efforts. I don't know that we have any
good references to academic literature here.

Your lack of such references in your patch means you don't know either,
but given that you indicate low string size performance is no worse,
this patch looks fine.

In summary:

- You assume applications will be using strings > 32 bytes, and that's
  not an entirely unreasonable assumption to make.

- You show performance with <= 32b remains the same and longer string
  lengths improve.

- You contribute the microbenchmark changes that allowed you to measure
  these numbers.

That's exactly what I want to see from a good contribution.

Now I plotted the power8 performance and there is a big bump in the middle,
any idea why?

https://docs.google.com/a/redhat.com/spreadsheets/d/16kW90bXH7nC8Ak6Xyoe4cxVIvFPwjVDcO-7qsZs0iVc/pubhtml
  
Rajalakshmi S March 20, 2017, 8:39 a.m. UTC | #10
On 03/17/2017 09:08 PM, Carlos O'Donell wrote:
> Now I plotted the power8 performance and there is a big bump in the middle,
> any idea why?
>
> https://docs.google.com/a/redhat.com/spreadsheets/d/16kW90bXH7nC8Ak6Xyoe4cxVIvFPwjVDcO-7qsZs0iVc/pubhtml

I am not able to access this.
  
Carlos O'Donell March 20, 2017, 4:34 p.m. UTC | #11
On 03/20/2017 04:39 AM, Rajalakshmi Srinivasaraghavan wrote:
> 
> 
> On 03/17/2017 09:08 PM, Carlos O'Donell wrote:
>> Now I plotted the power8 performance and there is a big bump in the middle,
>> any idea why?
>>
>> https://docs.google.com/a/redhat.com/spreadsheets/d/16kW90bXH7nC8Ak6Xyoe4cxVIvFPwjVDcO-7qsZs0iVc/pubhtml
> 
> I am not able to access this.
> 

This should work.

https://docs.google.com/spreadsheets/d/1e2QHzIvMEp_71z0NqFbhRxY27BzscmV23UeE0NawQKE/edit?usp=sharing
  
Rajalakshmi S March 21, 2017, 5:14 a.m. UTC | #12
On 03/17/2017 09:08 PM, Carlos O'Donell wrote:
> Knowing the mean value of string lengths would let us decide where
> to place most of optimization efforts. I don't know that we have any
> good references to academic literature here.
>
No, I dont have details about strrchr call statistics. This
optimization is to make use of POWER 8 capabilties in general.

> Now I plotted the power8 performance and there is a big bump in the middle,
> any idea why?

I could see some sudden increase for sizes 104-118 and around 222
in the shared sheet. However there is no special logic in the code
related to these sizes. I tried to check if this happens on my
test P8 ppc64le system and I could not recreate this.
  
Tulio Magno Quites Machado Filho April 3, 2017, 3:29 p.m. UTC | #13
Carlos O'Donell <carlos@redhat.com> writes:

> Now I plotted the power8 performance and there is a big bump in the middle,
> any idea why?

I remember that we discussed this on #glibc and you noticed the same behavior
on simple_strrchr and we agreed this could be raw hardware behavior.

Do you think this question is still blocking this patch?
  
Carlos O'Donell April 17, 2017, 3:20 p.m. UTC | #14
On 04/03/2017 11:29 AM, Tulio Magno Quites Machado Filho wrote:
> Carlos O'Donell <carlos@redhat.com> writes:
> 
>> Now I plotted the power8 performance and there is a big bump in the middle,
>> any idea why?
> 
> I remember that we discussed this on #glibc and you noticed the same behavior
> on simple_strrchr and we agreed this could be raw hardware behavior.
> 
> Do you think this question is still blocking this patch?
 
My question does not block the patch.

At this point the patch looks good to me.

You just have an unexplained issue with performance, but it appears to impact
_all_ the algorithms, simple, and the new POWER8 one, so it must be some semantic
of the hardware showing up in the algorithm. It's odd to see that consistent bump.
  

Patch

diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index f5889a3..0fc0ebc 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -14,7 +14,8 @@  sysdep_routines += memcpy-power7 memcpy-a2 memcpy-power6 memcpy-cell \
 		   strchrnul-power8 strchrnul-power7 strchrnul-ppc64 \
 		   strcpy-power8 strcpy-power7 strcpy-ppc64 stpcpy-power8 \
 		   stpcpy-power7 stpcpy-ppc64 \
-		   strrchr-power7 strrchr-ppc64 strncat-power7 strncat-ppc64 \
+		   strrchr-power8 strrchr-power7 strrchr-ppc64 \
+		   strncat-power7 strncat-ppc64 \
 		   strncpy-power7 strncpy-ppc64 \
 		   stpncpy-power8 stpncpy-power7 stpncpy-ppc64 \
 		   strcmp-power9 strcmp-power8 strcmp-power7 strcmp-ppc64 \
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index 209aec5..d77c47f 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -281,6 +281,9 @@  __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/powerpc/powerpc64/multiarch/strrchr.c.  */
   IFUNC_IMPL (i, name, strrchr,
 	      IFUNC_IMPL_ADD (array, i, strrchr,
+			      hwcap2 & PPC_FEATURE2_ARCH_2_07,
+			      __strrchr_power8)
+	      IFUNC_IMPL_ADD (array, i, strrchr,
 			      hwcap & PPC_FEATURE_HAS_VSX,
 			      __strrchr_power7)
 	      IFUNC_IMPL_ADD (array, i, strrchr, 1,
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S
new file mode 100644
index 0000000..23365a1
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S
@@ -0,0 +1,39 @@ 
+/* Optimized strrchr implementation for POWER8.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+  .section ".text";						\
+  ENTRY_2(__strrchr_power8)					\
+  .align ALIGNARG(2);						\
+  BODY_LABEL(__strrchr_power8):					\
+  cfi_startproc;						\
+  LOCALENTRY(__strrchr_power8)
+
+#undef END
+#define END(name)						\
+  cfi_endproc;							\
+  TRACEBACK(__strrchr_power8)					\
+  END_2(__strrchr_power8)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strrchr.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strrchr.c b/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
index dc1d3d0..0f94c9d 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
@@ -25,11 +25,14 @@ 
 
 extern __typeof (strrchr) __strrchr_ppc attribute_hidden;
 extern __typeof (strrchr) __strrchr_power7 attribute_hidden;
+extern __typeof (strrchr) __strrchr_power8 attribute_hidden;
 #undef strrchr
 
 /* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
    ifunc symbol properly.  */
 libc_ifunc_redirected (__redirect_strrchr, strrchr,
+		       (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+		       ? __strrchr_power8 :
 		       (hwcap & PPC_FEATURE_HAS_VSX)
 		       ? __strrchr_power7
 		       : __strrchr_ppc);
diff --git a/sysdeps/powerpc/powerpc64/power8/strrchr.S b/sysdeps/powerpc/powerpc64/power8/strrchr.S
new file mode 100644
index 0000000..8eb7485
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strrchr.S
@@ -0,0 +1,464 @@ 
+/* Optimized strrchr implementation for PowerPC64/POWER7 using cmpb insn.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* char *[r3] strrchr (char *s [r3], int c [r4])  */
+/* TODO: change these to the actual instructions when the minimum required
+   binutils allows it.  */
+#define MTVRD(v,r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define VBPERMQ(t,a,b)  .long (0x1000054c \
+				| ((t)<<(32-11)) \
+				| ((a)<<(32-16)) \
+				| ((b)<<(32-21)) )
+#define VCLZD(r,v) .long (0x100007c2 | ((r)<<(32-11)) | ((v)<<(32-21)))
+#define VPOPCNTD(r,v) .long (0x100007c3 | ((r)<<(32-11)) | ((v)<<(32-21)))
+#define VADDUQM(t,a,b)  .long (0x10000100 \
+				| ((t)<<(32-11)) \
+				| ((a)<<(32-16)) \
+				| ((b)<<(32-21)) )
+#ifdef __LITTLE_ENDIAN__
+/* Find the match position from v6 and place result in r6.  */
+# define CALCULATE_MATCH() \
+	VBPERMQ(v6, v6, v10); \
+	vsldoi	v6, v6, v6, 6; \
+	MFVRD(r7, v6); \
+	cntlzd	r6, r7; \
+	subfic	r6, r6, 15;
+/*
+ * Find the first null position to mask bytes after null.
+ * (reg): vcmpequb result: v2 for 1st qw v3 for 2nd qw.
+ * Result placed at v2.
+ */
+# define FIND_NULL_POS(reg) \
+	vspltisb	v11, -1; \
+	VADDUQM(v11, reg, v11); \
+	vandc	v11, v11, reg; \
+	VPOPCNTD(v2, v11); \
+	vspltb	v11, v2, 15; \
+	vcmpequb.	v11, v11, v9; \
+	blt	cr6, 1f; \
+	vsldoi	v9, v0, v9, 1; \
+	vslo	v2, v2, v9; \
+1: \
+	vsumsws	v2, v2, v0;
+#else
+# define CALCULATE_MATCH() \
+	VBPERMQ(v6, v6, v10); \
+	MFVRD(r7, v6); \
+	addi	r6, r7, -1; \
+	andc	r6, r6, r7; \
+	popcntd	r6, r6; \
+	subfic	r6, r6, 15;
+# define FIND_NULL_POS(reg) \
+	VCLZD(v2, reg); \
+	vspltb	v11, v2, 7; \
+	vcmpequb.	v11, v11, v9; \
+	blt	cr6, 1f; \
+	vsldoi	v9, v0, v9, 1; \
+	vsro	v2, v2, v9; \
+1: \
+	vsumsws	v2, v2, v0;
+#endif	/* !__LITTLE_ENDIAN__  */
+	.machine  power7
+ENTRY (strrchr)
+	CALL_MCOUNT 2
+	dcbt	0,r3
+	clrrdi	r8,r3,3	      /* Align the address to doubleword boundary.  */
+	cmpdi	cr7,r4,0
+	ld	r12,0(r8)     /* Load doubleword from memory.  */
+	li	r9,0	      /* Used to store last occurence.  */
+	li	r0,0	      /* Doubleword with null chars to use
+				 with cmpb.  */
+
+	rlwinm	r6,r3,3,26,28 /* Calculate padding.  */
+
+	beq	cr7,L(null_match)
+
+	/* Replicate byte to doubleword.  */
+	insrdi	r4,r4,8,48
+	insrdi	r4,r4,16,32
+	insrdi	r4,r4,32,0
+
+	/* r4 is changed now.  If it's passed more chars, then
+	   check for null again.  */
+	cmpdi	cr7,r4,0
+	beq	cr7,L(null_match)
+	/* Now r4 has a doubleword of c bytes and r0 has
+	   a doubleword of null bytes.  */
+
+	cmpb	r10,r12,r4     /* Compare each byte against c byte.  */
+	cmpb	r11,r12,r0     /* Compare each byte against null byte.  */
+
+	/* Move the doublewords left and right to discard the bits that are
+	   not part of the string and bring them back as zeros.  */
+#ifdef __LITTLE_ENDIAN__
+	srd	r10,r10,r6
+	srd	r11,r11,r6
+	sld	r10,r10,r6
+	sld	r11,r11,r6
+#else
+	sld	r10,r10,r6
+	sld	r11,r11,r6
+	srd	r10,r10,r6
+	srd	r11,r11,r6
+#endif
+	or	r5,r10,r11    /* OR the results to speed things up.  */
+	cmpdi	cr7,r5,0      /* If r5 == 0, no c or null bytes
+				 have been found.  */
+	bne	cr7,L(done)
+
+L(align):
+	andi.	r12, r8, 15
+
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+
+	bne	cr0, L(loop)
+
+	/* Handle WORD2 of pair.  */
+	ldu	r12,8(r8)
+	cmpb	r10,r12,r4
+	cmpb	r11,r12,r0
+	or	r5,r10,r11
+	cmpdi	cr7,r5,0
+	bne	cr7,L(done)
+	b	L(loop)	      /* We branch here (rather than falling through)
+				 to skip the nops due to heavy alignment
+				 of the loop below.  */
+	.p2align  5
+L(loop):
+	/* Load two doublewords, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the null-checking process for bigger strings.  */
+	ld	r12,8(r8)
+	ldu	r7,16(r8)
+	cmpb	r10,r12,r4
+	cmpb	r11,r12,r0
+	cmpb	r6,r7,r4
+	cmpb	r7,r7,r0
+	or	r12,r10,r11
+	or	r5,r6,r7
+	or	r5,r12,r5
+	cmpdi	cr7,r5,0
+	beq	cr7,L(vector)
+
+	/* OK, one (or both) of the doublewords contains a c/null byte.  Check
+	   the first doubleword and decrement the address in case the first
+	   doubleword really contains a c/null byte.  */
+	cmpdi	cr6,r12,0
+	addi	r8,r8,-8
+	bne	cr6,L(done)
+
+	/* The c/null byte must be in the second doubleword.  Adjust the
+	   address again and move the result of cmpb to r10 so we can calculate
+	   the pointer.  */
+
+	mr	r10,r6
+	mr	r11,r7
+	addi	r8,r8,8
+
+	/* r10/r11 have the output of the cmpb instructions, that is,
+	   0xff in the same position as the c/null byte in the original
+	   doubleword from the string.  Use that to calculate the pointer.  */
+
+L(done):
+	/* If there are more than one 0xff in r11, find the first position of
+	   0xff in r11 and fill r10 with 0 from that position.  */
+	cmpdi	cr7,r11,0
+	beq	cr7,L(no_null)
+#ifdef __LITTLE_ENDIAN__
+	addi	r3,r11,-1
+	andc	r3,r3,r11
+	popcntd r0,r3
+#else
+	cntlzd	r0,r11
+#endif
+	subfic	r0,r0,63
+	li	r6,-1
+#ifdef __LITTLE_ENDIAN__
+	srd	r0,r6,r0
+#else
+	sld	r0,r6,r0
+#endif
+	and	r10,r0,r10
+L(no_null):
+#ifdef __LITTLE_ENDIAN__
+	cntlzd	r0,r10		/* Count leading zeros before c matches.  */
+	addi	r3,r10,-1
+	andc	r3,r3,r10
+	addi	r10,r11,-1
+	andc	r10,r10,r11
+	cmpld	cr7,r3,r10
+	bgt	cr7,L(no_match)
+#else
+	addi	r3,r10,-1	/* Count trailing zeros before c matches.  */
+	andc	r3,r3,r10
+	popcntd	r0,r3
+	cmpld	cr7,r11,r10
+	bgt	cr7,L(no_match)
+#endif
+	srdi	r0,r0,3		/* Convert trailing zeros to bytes.  */
+	subfic	r0,r0,7
+	add	r9,r8,r0      /* Return address of the matching c byte
+				 or null in case c was not found.  */
+	li	r0,0
+	cmpdi	cr7,r11,0     /* If r11 == 0, no null's have been found.  */
+	beq	cr7,L(align)
+
+	.align	4
+L(no_match):
+	mr	r3,r9
+	blr
+
+/* Check the first 32B in GPR's and move to vectorized loop.  */
+	.p2align  5
+L(vector):
+	addi	r3, r8, 8
+	/* Make sure 32B aligned.  */
+	andi.	r10, r3, 31
+	bne	cr0, L(loop)
+	vspltisb	v0, 0
+	/* Precompute vbpermq constant.  */
+	vspltisb	v10, 3
+	lvsl	v11, r0, r0
+	vslb	v10, v11, v10
+	MTVRD(v1, r4)
+	li	r5, 16
+	vspltb	v1, v1, 7
+	/* Compare 32 bytes in each loop.  */
+L(continue):
+	lvx	v4, 0, r3
+	lvx	v5, r3, r5
+	vcmpequb	v2, v0, v4
+	vcmpequb	v3, v0, v5
+	vcmpequb	v6, v1, v4
+	vcmpequb	v7, v1, v5
+	vor	v8, v2, v3
+	vor	v9, v6, v7
+	vor	v11, v8, v9
+	vcmpequb.	v11, v0, v11
+	addi	r3, r3, 32
+	blt	cr6, L(continue)
+	vcmpequb.	v8, v0, v8
+	blt	cr6, L(match)
+
+	/* One (or both) of the quadwords contains c/null.  */
+	vspltisb	v8, 2
+	vspltisb	v9, 5
+	/* Precompute values used for comparison.  */
+	vsl	v9, v8, v9	/* v9 = 0x4040404040404040.  */
+	vaddubm	v8, v9, v9
+	vsldoi	v8, v0, v8, 1	/* v8 = 0x80.  */
+
+	/* Check if null is in second qw.  */
+	vcmpequb.	v11, v0, v2
+	blt	cr6, L(secondqw)
+
+	/* Null found in first qw.  */
+	addi	r8, r3, -32
+	/* Calculate the null position.  */
+	FIND_NULL_POS(v2)
+	/* Check if null is in the first byte.  */
+	vcmpequb.	v11, v0, v2
+	blt	cr6, L(no_match)
+	vsububm	v2, v8, v2
+	/* Mask unwanted bytes after null.  */
+#ifdef __LITTLE_ENDIAN__
+	vslo	v6, v6, v2
+	vsro	v6, v6, v2
+#else
+	vsro	v6, v6, v2
+	vslo	v6, v6, v2
+#endif
+	vcmpequb.	v11, v0, v6
+	blt	cr6, L(no_match)
+	/* Found a match before null.  */
+	CALCULATE_MATCH()
+	add	r3, r8, r6
+	blr
+
+L(secondqw):
+	addi	r8, r3, -16
+	FIND_NULL_POS(v3)
+	vcmpequb.	v11, v0, v2
+	blt	cr6, L(no_match1)
+	vsububm	v2, v8, v2
+	/* Mask unwanted bytes after null.  */
+#ifdef __LITTLE_ENDIAN__
+	vslo	v7, v7, v2
+	vsro	v7, v7, v2
+#else
+	vsro	v7, v7, v2
+	vslo	v7, v7, v2
+#endif
+	vcmpequb.	v11, v0, v7
+	blt	cr6, L(no_match1)
+	addi	r8, r8, 16
+	vor	v6, v0, v7
+L(no_match1):
+	addi	r8, r8, -16
+	vcmpequb.	v11, v0, v6
+	blt	cr6, L(no_match)
+	/* Found a match before null.  */
+	CALCULATE_MATCH()
+	add	r3, r8, r6
+	blr
+
+L(match):
+	/* One (or both) of the quadwords contains a match.  */
+	mr	r8, r3
+	vcmpequb.	v8, v0, v7
+	blt	cr6, L(firstqw)
+	/* Match found in second qw.  */
+	addi	r8, r8, 16
+	vor	v6, v0, v7
+L(firstqw):
+	addi	r8, r8, -32
+	CALCULATE_MATCH()
+	add	r9, r8, r6      /* Compute final length.  */
+	b	L(continue)
+/* We are here because strrchr was called with a null byte.  */
+	.align	4
+L(null_match):
+	/* r0 has a doubleword of null bytes.  */
+
+	cmpb	r5,r12,r0     /* Compare each byte against null bytes.  */
+
+	/* Move the doublewords left and right to discard the bits that are
+	   not part of the string and bring them back as zeros.  */
+#ifdef __LITTLE_ENDIAN__
+	srd	r5,r5,r6
+	sld	r5,r5,r6
+#else
+	sld	r5,r5,r6
+	srd	r5,r5,r6
+#endif
+	cmpdi	cr7,r5,0      /* If r5 == 0, no c or null bytes
+				 have been found.  */
+	bne	cr7,L(done_null)
+
+	andi.	r12, r8, 15
+
+	/* Are we now aligned to a quadword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+
+	bne	cr0, L(loop_null)
+
+	/* Handle WORD2 of pair.  */
+	ldu	r12,8(r8)
+	cmpb	r5,r12,r0
+	cmpdi	cr7,r5,0
+	bne	cr7,L(done_null)
+	b	L(loop_null)  /* We branch here (rather than falling through)
+				 to skip the nops due to heavy alignment
+				 of the loop below.  */
+
+	/* Main loop to look for the end of the string.  Since it's a
+	   small loop (< 8 instructions), align it to 32-bytes.  */
+	.p2align  5
+L(loop_null):
+	/* Load two doublewords, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the null-checking process for bigger strings.  */
+	ld	r12,8(r8)
+	ldu	r11,16(r8)
+	cmpb	r5,r12,r0
+	cmpb	r10,r11,r0
+	or	r6,r5,r10
+	cmpdi	cr7,r6,0
+	beq	cr7,L(vector1)
+
+	/* OK, one (or both) of the doublewords contains a null byte.  Check
+	   the first doubleword and decrement the address in case the first
+	   doubleword really contains a null byte.  */
+
+	cmpdi	cr6,r5,0
+	addi	r8,r8,-8
+	bne	cr6,L(done_null)
+
+	/* The null byte must be in the second doubleword.  Adjust the address
+	   again and move the result of cmpb to r10 so we can calculate the
+	   pointer.  */
+
+	mr	r5,r10
+	addi	r8,r8,8
+
+	/* r5 has the output of the cmpb instruction, that is, it contains
+	   0xff in the same position as the null byte in the original
+	   doubleword from the string.  Use that to calculate the pointer.  */
+L(done_null):
+#ifdef __LITTLE_ENDIAN__
+	addi	r0,r5,-1
+	andc	r0,r0,r5
+	popcntd	r0,r0
+#else
+	cntlzd	r0,r5	      /* Count leading zeros before the match.  */
+#endif
+	srdi	r0,r0,3	      /* Convert trailing zeros to bytes.  */
+	add	r3,r8,r0      /* Return address of the matching null byte.  */
+	blr
+/* Check the first 32B in GPR's and move to vectorized loop.  */
+	.p2align  5
+L(vector1):
+	addi	r3, r8, 8
+	/* Make sure 32B aligned.  */
+	andi.	r10, r3, 31
+	bne	cr0, L(loop_null)
+	vspltisb	v0, 0
+	/* Precompute vbpermq constant.  */
+	vspltisb	v10, 3
+	lvsl	v11, r0, r0
+	vslb	v10, v11, v10
+	li	r5, 16
+	/* Compare 32 bytes in each loop.  */
+L(continue1):
+	lvx	v4, 0, r3
+	lvx	v5, r3, r5
+	vcmpequb	v2, v0, v4
+	vcmpequb	v3, v0, v5
+	vor	v8, v2, v3
+	vcmpequb.	v11, v0, v8
+	addi	r3, r3, 32
+	blt	cr6, L(continue1)
+	addi	r3, r3, -32
+	VBPERMQ(v2, v2, v10)
+	VBPERMQ(v3, v3, v10)
+	/* Shift each component into its correct position for merging.  */
+#ifdef __LITTLE_ENDIAN__
+	vsldoi	v3, v3, v3, 2
+#else
+	vsldoi	v2, v2, v2, 6
+	vsldoi	v3, v3, v3, 4
+#endif
+	/* Merge the results and move to a GPR.  */
+	vor	v4, v3, v2
+	MFVRD(r5, v4)
+#ifdef __LITTLE_ENDIAN__
+	addi	r6, r5, -1
+	andc	r6, r6, r5
+	popcntd	r6, r6
+#else
+	cntlzd	r6, r5  /* Count leading zeros before the match.  */
+#endif
+	add	r3, r3, r6      /* Compute final length.  */
+	blr
+END (strrchr)
+weak_alias (strrchr, rindex)
+libc_hidden_builtin_def (strrchr)