[v11,10/29] string: Improve generic stpcpy
Checks
Context |
Check |
Description |
dj/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
Commit Message
It follows the strategy:
- Align the destination on word boundary using byte operations.
- If source is also word aligned, read a word per time, check for
null (using has_zero from string-fzb.h), and write the remaining
bytes.
- If source is not word aligned, loop by aligning the source, and
merging the result of two reads. Similar to aligned case,
check for null with has_zero, and write the remaining bytes if
null is found.
Checked on x86_64-linux-gnu, i686-linux-gnu, powerpc64-linux-gnu,
and powerpc-linux-gnu by removing the arch-specific assembly
implementation and disabling multi-arch (it covers both LE and BE
for 64 and 32 bits).
---
string/stpcpy.c | 92 +++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 86 insertions(+), 6 deletions(-)
Comments
On Wed, 2023-02-01 at 14:03 -0300, Adhemerval Zanella wrote:
> +static __always_inline char *
> +stpcpy_unaligned_loop (op_t *restrict dst, const op_t *restrict src,
> + uintptr_t ofs)
> +{
> + op_t w2a = *src++;
> + uintptr_t sh_1 = ofs * CHAR_BIT;
> + uintptr_t sh_2 = OPSIZ * CHAR_BIT - sh_1;
Hmm, on 64-bit LoongArch if we "clone" the function 7 times to
stpcpy_unaligned_loop_{1..7} and call them with a switch (ofs) { ... }
construction, we'd be able to use bytepick.d instruction for MERGE,
saving 2 instructions in the iteration. But maybe this is going too
far. I'm not sure if this "optimization" applies for other
architectures.
> + op_t w2 = MERGE (w2a, sh_1, (op_t)-1, sh_2);
> + if (!has_zero (w2))
> + {
> + op_t w2b;
> +
> + /* Unaligned loop. The invariant is that W2B, which is "ahead" of W1,
> + does not contain end-of-string. Therefore it is safe (and necessary)
> + to read another word from each while we do not have a difference. */
> + while (1)
> + {
> + w2b = *src++;
> + w2 = MERGE (w2a, sh_1, w2b, sh_2);
> + /* Check if there is zero on w2a. */
> + if (has_zero (w2))
> + goto out;
> + *dst++ = w2;
> + if (has_zero (w2b))
> + break;
> + w2a = w2b;
> + }
> +
> + /* Align the final partial of P2. */
> + w2 = MERGE (w2b, sh_1, 0, sh_2);
> + }
> +
> +out:
> + return write_byte_from_word (dst, w2);
> +}
> +
On 2/1/23 07:03, Adhemerval Zanella wrote:
> It follows the strategy:
>
> - Align the destination on word boundary using byte operations.
>
> - If source is also word aligned, read a word per time, check for
> null (using has_zero from string-fzb.h), and write the remaining
> bytes.
>
> - If source is not word aligned, loop by aligning the source, and
> merging the result of two reads. Similar to aligned case,
> check for null with has_zero, and write the remaining bytes if
> null is found.
>
> Checked on x86_64-linux-gnu, i686-linux-gnu, powerpc64-linux-gnu,
> and powerpc-linux-gnu by removing the arch-specific assembly
> implementation and disabling multi-arch (it covers both LE and BE
> for 64 and 32 bits).
> ---
> string/stpcpy.c | 92 +++++++++++++++++++++++++++++++++++++++++++++----
> 1 file changed, 86 insertions(+), 6 deletions(-)
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
r~
On Wed, Feb 1, 2023 at 11:04 AM Adhemerval Zanella
<adhemerval.zanella@linaro.org> wrote:
>
> It follows the strategy:
>
> - Align the destination on word boundary using byte operations.
>
> - If source is also word aligned, read a word per time, check for
> null (using has_zero from string-fzb.h), and write the remaining
> bytes.
>
> - If source is not word aligned, loop by aligning the source, and
> merging the result of two reads. Similar to aligned case,
> check for null with has_zero, and write the remaining bytes if
> null is found.
>
> Checked on x86_64-linux-gnu, i686-linux-gnu, powerpc64-linux-gnu,
> and powerpc-linux-gnu by removing the arch-specific assembly
> implementation and disabling multi-arch (it covers both LE and BE
> for 64 and 32 bits).
> ---
> string/stpcpy.c | 92 +++++++++++++++++++++++++++++++++++++++++++++----
> 1 file changed, 86 insertions(+), 6 deletions(-)
>
> diff --git a/string/stpcpy.c b/string/stpcpy.c
> index 8df5065cfe..dd0fef12ef 100644
> --- a/string/stpcpy.c
> +++ b/string/stpcpy.c
> @@ -15,12 +15,12 @@
> License along with the GNU C Library; if not, see
> <https://www.gnu.org/licenses/>. */
>
> -#ifdef HAVE_CONFIG_H
> -# include <config.h>
> -#endif
> -
> #define NO_MEMPCPY_STPCPY_REDIRECT
> #include <string.h>
> +#include <stdint.h>
> +#include <memcopy.h>
> +#include <string-fzb.h>
> +#include <string-misc.h>
>
> #undef __stpcpy
> #undef stpcpy
> @@ -29,12 +29,92 @@
> # define STPCPY __stpcpy
> #endif
>
> +static __always_inline char *
> +write_byte_from_word (op_t *dest, op_t word)
> +{
> + char *d = (char *) dest;
> + for (size_t i = 0; i < OPSIZ; i++, ++d)
> + {
> + char c = extractbyte (word, i);
> + *d = c;
> + if (c == '\0')
> + break;
> + }
> + return d;
> +}
> +
> +static __always_inline char *
> +stpcpy_aligned_loop (op_t *restrict dst, const op_t *restrict src)
> +{
> + op_t word;
> + while (1)
> + {
> + word = *src++;
> + if (has_zero (word))
> + break;
> + *dst++ = word;
> + }
> +
> + return write_byte_from_word (dst, word);
> +}
> +
> +static __always_inline char *
> +stpcpy_unaligned_loop (op_t *restrict dst, const op_t *restrict src,
> + uintptr_t ofs)
> +{
Is the perf cost of unaligned stores greater than merging?
> + op_t w2a = *src++;
> + uintptr_t sh_1 = ofs * CHAR_BIT;
> + uintptr_t sh_2 = OPSIZ * CHAR_BIT - sh_1;
> +
> + op_t w2 = MERGE (w2a, sh_1, (op_t)-1, sh_2);
> + if (!has_zero (w2))
> + {
> + op_t w2b;
> +
> + /* Unaligned loop. The invariant is that W2B, which is "ahead" of W1,
> + does not contain end-of-string. Therefore it is safe (and necessary)
> + to read another word from each while we do not have a difference. */
> + while (1)
> + {
> + w2b = *src++;
> + w2 = MERGE (w2a, sh_1, w2b, sh_2);
> + /* Check if there is zero on w2a. */
> + if (has_zero (w2))
> + goto out;
> + *dst++ = w2;
> + if (has_zero (w2b))
> + break;
> + w2a = w2b;
> + }
> +
> + /* Align the final partial of P2. */
> + w2 = MERGE (w2b, sh_1, 0, sh_2);
> + }
> +
> +out:
> + return write_byte_from_word (dst, w2);
> +}
> +
> +
> /* Copy SRC to DEST, returning the address of the terminating '\0' in DEST. */
> char *
> STPCPY (char *dest, const char *src)
> {
> - size_t len = strlen (src);
> - return memcpy (dest, src, len + 1) + len;
> + /* Copy just a few bytes to make DEST aligned. */
> + size_t len = (-(uintptr_t) dest) % OPSIZ;
> + for (; len != 0; len--, ++dest)
> + {
> + char c = *src++;
> + *dest = c;
> + if (c == '\0')
> + return dest;
> + }
> +
> + /* DEST is now aligned to op_t, SRC may or may not be. */
> + uintptr_t ofs = (uintptr_t) src % OPSIZ;
> + return ofs == 0 ? stpcpy_aligned_loop ((op_t*) dest, (const op_t *) src)
> + : stpcpy_unaligned_loop ((op_t*) dest,
> + (const op_t *) (src - ofs) , ofs);
> }
> weak_alias (__stpcpy, stpcpy)
> libc_hidden_def (__stpcpy)
> --
> 2.34.1
>
On 2/1/23 09:47, Noah Goldstein wrote:
>> +static __always_inline char *
>> +stpcpy_unaligned_loop (op_t *restrict dst, const op_t *restrict src,
>> + uintptr_t ofs)
>> +{
>
> Is the perf cost of unaligned stores greater than merging?
Generically, yes. Some hosts have no hw unaligned stores at all without even fallback to
trap and emulate, and compiler-generated unaligned stores devolve to byte-by-byte.
r~
On 01/02/23 14:29, Xi Ruoyao wrote:
> On Wed, 2023-02-01 at 14:03 -0300, Adhemerval Zanella wrote:
>> +static __always_inline char *
>> +stpcpy_unaligned_loop (op_t *restrict dst, const op_t *restrict src,
>> + uintptr_t ofs)
>> +{
>> + op_t w2a = *src++;
>> + uintptr_t sh_1 = ofs * CHAR_BIT;
>> + uintptr_t sh_2 = OPSIZ * CHAR_BIT - sh_1;
>
> Hmm, on 64-bit LoongArch if we "clone" the function 7 times to
> stpcpy_unaligned_loop_{1..7} and call them with a switch (ofs) { ... }
> construction, we'd be able to use bytepick.d instruction for MERGE,
> saving 2 instructions in the iteration. But maybe this is going too
> far. I'm not sure if this "optimization" applies for other
> architectures.
I think it should be feasible, I might get back to optimize the unaligned
loop with this strategy. But I will need to check if compiler will indeed
exploit the fact that the shifts are now constants to optimize the merge.
It also increases the code size slight, on x86_64 text size went from 850
to 1993 and on loongarch from 864 to 2200 (so it might be something to
consider as well assuming that unaligned strings will have a equal probability
to happen, so icache pressure would be important).
>
>> + op_t w2 = MERGE (w2a, sh_1, (op_t)-1, sh_2);
>> + if (!has_zero (w2))
>> + {
>> + op_t w2b;
>> +
>> + /* Unaligned loop. The invariant is that W2B, which is "ahead" of W1,
>> + does not contain end-of-string. Therefore it is safe (and necessary)
>> + to read another word from each while we do not have a difference. */
>> + while (1)
>> + {
>> + w2b = *src++;
>> + w2 = MERGE (w2a, sh_1, w2b, sh_2);
>> + /* Check if there is zero on w2a. */
>> + if (has_zero (w2))
>> + goto out;
>> + *dst++ = w2;
>> + if (has_zero (w2b))
>> + break;
>> + w2a = w2b;
>> + }
>> +
>> + /* Align the final partial of P2. */
>> + w2 = MERGE (w2b, sh_1, 0, sh_2);
>> + }
>> +
>> +out:
>> + return write_byte_from_word (dst, w2);
>> +}
>> +
>
@@ -15,12 +15,12 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
-#ifdef HAVE_CONFIG_H
-# include <config.h>
-#endif
-
#define NO_MEMPCPY_STPCPY_REDIRECT
#include <string.h>
+#include <stdint.h>
+#include <memcopy.h>
+#include <string-fzb.h>
+#include <string-misc.h>
#undef __stpcpy
#undef stpcpy
@@ -29,12 +29,92 @@
# define STPCPY __stpcpy
#endif
+static __always_inline char *
+write_byte_from_word (op_t *dest, op_t word)
+{
+ char *d = (char *) dest;
+ for (size_t i = 0; i < OPSIZ; i++, ++d)
+ {
+ char c = extractbyte (word, i);
+ *d = c;
+ if (c == '\0')
+ break;
+ }
+ return d;
+}
+
+static __always_inline char *
+stpcpy_aligned_loop (op_t *restrict dst, const op_t *restrict src)
+{
+ op_t word;
+ while (1)
+ {
+ word = *src++;
+ if (has_zero (word))
+ break;
+ *dst++ = word;
+ }
+
+ return write_byte_from_word (dst, word);
+}
+
+static __always_inline char *
+stpcpy_unaligned_loop (op_t *restrict dst, const op_t *restrict src,
+ uintptr_t ofs)
+{
+ op_t w2a = *src++;
+ uintptr_t sh_1 = ofs * CHAR_BIT;
+ uintptr_t sh_2 = OPSIZ * CHAR_BIT - sh_1;
+
+ op_t w2 = MERGE (w2a, sh_1, (op_t)-1, sh_2);
+ if (!has_zero (w2))
+ {
+ op_t w2b;
+
+ /* Unaligned loop. The invariant is that W2B, which is "ahead" of W1,
+ does not contain end-of-string. Therefore it is safe (and necessary)
+ to read another word from each while we do not have a difference. */
+ while (1)
+ {
+ w2b = *src++;
+ w2 = MERGE (w2a, sh_1, w2b, sh_2);
+ /* Check if there is zero on w2a. */
+ if (has_zero (w2))
+ goto out;
+ *dst++ = w2;
+ if (has_zero (w2b))
+ break;
+ w2a = w2b;
+ }
+
+ /* Align the final partial of P2. */
+ w2 = MERGE (w2b, sh_1, 0, sh_2);
+ }
+
+out:
+ return write_byte_from_word (dst, w2);
+}
+
+
/* Copy SRC to DEST, returning the address of the terminating '\0' in DEST. */
char *
STPCPY (char *dest, const char *src)
{
- size_t len = strlen (src);
- return memcpy (dest, src, len + 1) + len;
+ /* Copy just a few bytes to make DEST aligned. */
+ size_t len = (-(uintptr_t) dest) % OPSIZ;
+ for (; len != 0; len--, ++dest)
+ {
+ char c = *src++;
+ *dest = c;
+ if (c == '\0')
+ return dest;
+ }
+
+ /* DEST is now aligned to op_t, SRC may or may not be. */
+ uintptr_t ofs = (uintptr_t) src % OPSIZ;
+ return ofs == 0 ? stpcpy_aligned_loop ((op_t*) dest, (const op_t *) src)
+ : stpcpy_unaligned_loop ((op_t*) dest,
+ (const op_t *) (src - ofs) , ofs);
}
weak_alias (__stpcpy, stpcpy)
libc_hidden_def (__stpcpy)