[v2,5/8] x86: Add SSE2 optimized chacha20

Message ID 20220418120203.3185943-6-adhemerval.zanella@linaro.org
State Superseded
Headers
Series Add arc4random support |

Checks

Context Check Description
dj/TryBot-apply_patch success Patch applied to master at the time it was sent

Commit Message

Adhemerval Zanella April 18, 2022, 12:02 p.m. UTC
  It adds vectorized ChaCha20 implementation based on libgcrypt
cipher/chacha20-amd64-ssse3.S.  It replaces the ROTATE_SHUF_2 (which
uses pshufb) by ROTATE2 and thus making the original implementation
SSE2.

As for generic implementation, the last step that XOR with the
input is omited.

On a Ryzen 9 5900X it shows the following improvements (using
formatted bench-arc4random data):

GENERIC
Function                                 MB/s
--------------------------------------------------
arc4random [single-thread]               372.95
arc4random_buf(16) [single-thread]       490.35
arc4random_buf(32) [single-thread]       562.70
arc4random_buf(48) [single-thread]       588.75
arc4random_buf(64) [single-thread]       598.09
arc4random_buf(80) [single-thread]       606.79
arc4random_buf(96) [single-thread]       613.47
arc4random_buf(112) [single-thread]      617.11
arc4random_buf(128) [single-thread]      621.27
--------------------------------------------------

SSSE3:
Function                                 MB/s
--------------------------------------------------
arc4random [single-thread]               601.92
arc4random_buf(16) [single-thread]       910.25
arc4random_buf(32) [single-thread]       1198.58
arc4random_buf(48) [single-thread]       1322.32
arc4random_buf(64) [single-thread]       1410.99
arc4random_buf(80) [single-thread]       1438.26
arc4random_buf(96) [single-thread]       1486.95
arc4random_buf(112) [single-thread]      1515.44
arc4random_buf(128) [single-thread]      1536.95
--------------------------------------------------

Checked on x86_64-linux-gnu.
---
 LICENSES                       |   4 +-
 sysdeps/x86_64/Makefile        |   6 +
 sysdeps/x86_64/chacha20-sse2.S | 314 +++++++++++++++++++++++++++++++++
 sysdeps/x86_64/chacha20_arch.h |  38 ++++
 4 files changed, 360 insertions(+), 2 deletions(-)
 create mode 100644 sysdeps/x86_64/chacha20-sse2.S
 create mode 100644 sysdeps/x86_64/chacha20_arch.h
  

Comments

Noah Goldstein April 18, 2022, 3:56 p.m. UTC | #1
On Mon, Apr 18, 2022 at 7:06 AM Adhemerval Zanella via Libc-alpha
<libc-alpha@sourceware.org> wrote:
>
> It adds vectorized ChaCha20 implementation based on libgcrypt
> cipher/chacha20-amd64-ssse3.S.  It replaces the ROTATE_SHUF_2 (which
> uses pshufb) by ROTATE2 and thus making the original implementation
> SSE2.
>
> As for generic implementation, the last step that XOR with the
> input is omited.
>
> On a Ryzen 9 5900X it shows the following improvements (using
> formatted bench-arc4random data):
>
> GENERIC
> Function                                 MB/s
> --------------------------------------------------
> arc4random [single-thread]               372.95
> arc4random_buf(16) [single-thread]       490.35
> arc4random_buf(32) [single-thread]       562.70
> arc4random_buf(48) [single-thread]       588.75
> arc4random_buf(64) [single-thread]       598.09
> arc4random_buf(80) [single-thread]       606.79
> arc4random_buf(96) [single-thread]       613.47
> arc4random_buf(112) [single-thread]      617.11
> arc4random_buf(128) [single-thread]      621.27
> --------------------------------------------------
>
> SSSE3:
> Function                                 MB/s
> --------------------------------------------------
> arc4random [single-thread]               601.92
> arc4random_buf(16) [single-thread]       910.25
> arc4random_buf(32) [single-thread]       1198.58
> arc4random_buf(48) [single-thread]       1322.32
> arc4random_buf(64) [single-thread]       1410.99
> arc4random_buf(80) [single-thread]       1438.26
> arc4random_buf(96) [single-thread]       1486.95
> arc4random_buf(112) [single-thread]      1515.44
> arc4random_buf(128) [single-thread]      1536.95
> --------------------------------------------------
>
> Checked on x86_64-linux-gnu.
> ---
>  LICENSES                       |   4 +-
>  sysdeps/x86_64/Makefile        |   6 +
>  sysdeps/x86_64/chacha20-sse2.S | 314 +++++++++++++++++++++++++++++++++
>  sysdeps/x86_64/chacha20_arch.h |  38 ++++
>  4 files changed, 360 insertions(+), 2 deletions(-)
>  create mode 100644 sysdeps/x86_64/chacha20-sse2.S
>  create mode 100644 sysdeps/x86_64/chacha20_arch.h
>
> diff --git a/LICENSES b/LICENSES
> index 7288d281dc..415991e208 100644
> --- a/LICENSES
> +++ b/LICENSES
> @@ -390,8 +390,8 @@ Copyright 2001 by Stephen L. Moshier <moshier@na-net.ornl.gov>
>   License along with this library; if not, see
>   <https://www.gnu.org/licenses/>.  */
>
> -sysdeps/aarch64/chacha20.S imports code from libgcrypt, with the
> -following notices:
> +sysdeps/aarch64/chacha20.S and sysdeps/x86_64/chacha20-sse2.S
> +import code from libgcrypt, with the following notices:
>
>  Copyright (C) 2017-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
>
> diff --git a/sysdeps/x86_64/Makefile b/sysdeps/x86_64/Makefile
> index 79365aff2a..c8fbc30857 100644
> --- a/sysdeps/x86_64/Makefile
> +++ b/sysdeps/x86_64/Makefile
> @@ -5,6 +5,12 @@ ifeq ($(subdir),csu)
>  gen-as-const-headers += link-defines.sym
>  endif
>
> +ifeq ($(subdir),stdlib)
> +sysdep_routines += \
> +  chacha20-sse2 \
> +  # sysdep_routines
> +endif
> +
>  ifeq ($(subdir),gmon)
>  sysdep_routines += _mcount
>  # We cannot compile _mcount.S with -pg because that would create
> diff --git a/sysdeps/x86_64/chacha20-sse2.S b/sysdeps/x86_64/chacha20-sse2.S
> new file mode 100644
> index 0000000000..7e39f3be9f
> --- /dev/null
> +++ b/sysdeps/x86_64/chacha20-sse2.S
> @@ -0,0 +1,314 @@
> +/* Optimized SSE2 implementation of ChaCha20 cipher.
> +   Copyright (C) 2022 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +
> +/* Based on D. J. Bernstein reference implementation at
> +   http://cr.yp.to/chacha.html:
> +
> +   chacha-regs.c version 20080118
> +   D. J. Bernstein
> +   Public domain.  */
> +
> +#include <sysdep.h>
> +
> +#ifdef PIC
> +#  define rRIP (%rip)
> +#else
> +#  define rRIP
> +#endif
> +
> +/* 'ret' instruction replacement for straight-line speculation mitigation */
> +#define ret_spec_stop \
> +        ret; int3;
> +
> +/* register macros */
> +#define INPUT %rdi
> +#define DST   %rsi
> +#define SRC   %rdx
> +#define NBLKS %rcx
> +#define ROUND %eax
> +
> +/* stack structure */
> +#define STACK_VEC_X12 (16)
> +#define STACK_VEC_X13 (16 + STACK_VEC_X12)
> +#define STACK_TMP     (16 + STACK_VEC_X13)
> +#define STACK_TMP1    (16 + STACK_TMP)
> +#define STACK_TMP2    (16 + STACK_TMP1)
> +
> +#define STACK_MAX     (16 + STACK_TMP2)
> +
> +/* vector registers */
> +#define X0 %xmm0
> +#define X1 %xmm1
> +#define X2 %xmm2
> +#define X3 %xmm3
> +#define X4 %xmm4
> +#define X5 %xmm5
> +#define X6 %xmm6
> +#define X7 %xmm7
> +#define X8 %xmm8
> +#define X9 %xmm9
> +#define X10 %xmm10
> +#define X11 %xmm11
> +#define X12 %xmm12
> +#define X13 %xmm13
> +#define X14 %xmm14
> +#define X15 %xmm15
> +
> +/**********************************************************************
> +  helper macros
> + **********************************************************************/
> +
> +/* 4x4 32-bit integer matrix transpose */
> +#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
> +       movdqa    x0, t2; \
> +       punpckhdq x1, t2; \
> +       punpckldq x1, x0; \
> +       \
> +       movdqa    x2, t1; \
> +       punpckldq x3, t1; \
> +       punpckhdq x3, x2; \
> +       \
> +       movdqa     x0, x1; \
> +       punpckhqdq t1, x1; \
> +       punpcklqdq t1, x0; \
> +       \
> +       movdqa     t2, x3; \
> +       punpckhqdq x2, x3; \
> +       punpcklqdq x2, t2; \
> +       movdqa     t2, x2;
> +
> +/* fill xmm register with 32-bit value from memory */
> +#define pbroadcastd(mem32, xreg) \
> +       movd mem32, xreg; \
> +       pshufd $0, xreg, xreg;
> +
> +#define clear(x) pxor x,x;
> +
> +/**********************************************************************
> +  4-way chacha20
> + **********************************************************************/
> +
> +#define ROTATE2(v1,v2,c,tmp1,tmp2)     \
> +       movdqa v1, tmp1;                \
> +       movdqa v2, tmp2;                \
> +       psrld $(32 - (c)), v1;          \
> +       pslld $(c), tmp1;               \
> +       paddb tmp1, v1;                 \
> +       psrld $(32 - (c)), v2;          \
> +       pslld $(c), tmp2;               \
> +       paddb tmp2, v2;
> +
> +#define XOR(ds,s) \
> +       pxor s, ds;
> +
> +#define PLUS(ds,s) \
> +       paddd s, ds;
> +
> +#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2,\
> +                     interleave_op1,interleave_op2)            \
> +               interleave_op1;                                 \
> +       PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2);       \
> +           ROTATE2(d1, d2, 16, tmp1, tmp2);                    \
> +       PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2);       \
> +           ROTATE2(b1, b2, 12, tmp1, tmp2);                    \
> +               interleave_op2;                                 \
> +       PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2);       \
> +           ROTATE2(d1, d2, 8, tmp1, tmp2);                     \
> +       PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2);       \
> +           ROTATE2(b1, b2,  7, tmp1, tmp2);
> +
> +       .section .text.sse2,"ax",@progbits
> +
> +chacha20_data:
> +       .align 16
> +L(counter1):
> +       .long 1,0,0,0
> +L(inc_counter):
> +       .long 0,1,2,3
> +L(unsigned_cmp):
> +       .long 0x80000000,0x80000000,0x80000000,0x80000000
> +
> +       .hidden __chacha20_sse2_blocks4
> +ENTRY (__chacha20_sse2_blocks4)
> +       /* input:
> +        *      %rdi: input
> +        *      %rsi: dst
> +        *      %rdx: src
> +        *      %rcx: nblks (multiple of 4)
> +        */
> +
> +       pushq %rbp;
> +       cfi_adjust_cfa_offset(8);
> +       cfi_rel_offset(rbp, 0)
> +       movq %rsp, %rbp;
> +       cfi_def_cfa_register(%rbp);
> +
> +       subq $STACK_MAX, %rsp;
> +       andq $~15, %rsp;
> +
> +L(loop4):
> +       mov $20, ROUND;
> +
> +       /* Construct counter vectors X12 and X13 */
> +       movdqa L(inc_counter) rRIP, X0;
> +       movdqa L(unsigned_cmp) rRIP, X2;
> +       pbroadcastd((12 * 4)(INPUT), X12);
> +       pbroadcastd((13 * 4)(INPUT), X13);
> +       paddd X0, X12;
> +       movdqa X12, X1;
> +       pxor X2, X0;
> +       pxor X2, X1;
> +       pcmpgtd X1, X0;
> +       psubd X0, X13;
> +       movdqa X12, (STACK_VEC_X12)(%rsp);
> +       movdqa X13, (STACK_VEC_X13)(%rsp);
> +
> +       /* Load vectors */
> +       pbroadcastd((0 * 4)(INPUT), X0);
> +       pbroadcastd((1 * 4)(INPUT), X1);
> +       pbroadcastd((2 * 4)(INPUT), X2);
> +       pbroadcastd((3 * 4)(INPUT), X3);
> +       pbroadcastd((4 * 4)(INPUT), X4);
> +       pbroadcastd((5 * 4)(INPUT), X5);
> +       pbroadcastd((6 * 4)(INPUT), X6);
> +       pbroadcastd((7 * 4)(INPUT), X7);
> +       pbroadcastd((8 * 4)(INPUT), X8);
> +       pbroadcastd((9 * 4)(INPUT), X9);
> +       pbroadcastd((10 * 4)(INPUT), X10);
> +       pbroadcastd((11 * 4)(INPUT), X11);
> +       pbroadcastd((14 * 4)(INPUT), X14);
> +       pbroadcastd((15 * 4)(INPUT), X15);

nit: personally would prefer 'pbroadcastd' to be upcased. It
looks more like an instruction than a macro as it is now.
> +       movdqa X11, (STACK_TMP)(%rsp);
> +       movdqa X15, (STACK_TMP1)(%rsp);
> +
> +L(round2_4):
> +       QUARTERROUND2(X0, X4,  X8, X12,   X1, X5,  X9, X13, tmp:=,X11,X15,,)
> +       movdqa (STACK_TMP)(%rsp), X11;
> +       movdqa (STACK_TMP1)(%rsp), X15;
> +       movdqa X8, (STACK_TMP)(%rsp);
> +       movdqa X9, (STACK_TMP1)(%rsp);
> +       QUARTERROUND2(X2, X6, X10, X14,   X3, X7, X11, X15, tmp:=,X8,X9,,)
> +       QUARTERROUND2(X0, X5, X10, X15,   X1, X6, X11, X12, tmp:=,X8,X9,,)
> +       movdqa (STACK_TMP)(%rsp), X8;
> +       movdqa (STACK_TMP1)(%rsp), X9;
> +       movdqa X11, (STACK_TMP)(%rsp);
> +       movdqa X15, (STACK_TMP1)(%rsp);
> +       QUARTERROUND2(X2, X7,  X8, X13,   X3, X4,  X9, X14, tmp:=,X11,X15,,)
> +       sub $2, ROUND;
> +       jnz .Lround2_4;

Can you make this L(round2_4)
> +
> +       /* tmp := X15 */
> +       movdqa (STACK_TMP)(%rsp), X11;
> +       pbroadcastd((0 * 4)(INPUT), X15);
> +       PLUS(X0, X15);
> +       pbroadcastd((1 * 4)(INPUT), X15);
> +       PLUS(X1, X15);
> +       pbroadcastd((2 * 4)(INPUT), X15);
> +       PLUS(X2, X15);
> +       pbroadcastd((3 * 4)(INPUT), X15);
> +       PLUS(X3, X15);
> +       pbroadcastd((4 * 4)(INPUT), X15);
> +       PLUS(X4, X15);
> +       pbroadcastd((5 * 4)(INPUT), X15);
> +       PLUS(X5, X15);
> +       pbroadcastd((6 * 4)(INPUT), X15);
> +       PLUS(X6, X15);
> +       pbroadcastd((7 * 4)(INPUT), X15);
> +       PLUS(X7, X15);
> +       pbroadcastd((8 * 4)(INPUT), X15);
> +       PLUS(X8, X15);
> +       pbroadcastd((9 * 4)(INPUT), X15);
> +       PLUS(X9, X15);
> +       pbroadcastd((10 * 4)(INPUT), X15);
> +       PLUS(X10, X15);
> +       pbroadcastd((11 * 4)(INPUT), X15);
> +       PLUS(X11, X15);
> +       movdqa (STACK_VEC_X12)(%rsp), X15;
> +       PLUS(X12, X15);
> +       movdqa (STACK_VEC_X13)(%rsp), X15;
> +       PLUS(X13, X15);
> +       movdqa X13, (STACK_TMP)(%rsp);
> +       pbroadcastd((14 * 4)(INPUT), X15);
> +       PLUS(X14, X15);
> +       movdqa (STACK_TMP1)(%rsp), X15;
> +       movdqa X14, (STACK_TMP1)(%rsp);
> +       pbroadcastd((15 * 4)(INPUT), X13);
> +       PLUS(X15, X13);
> +       movdqa X15, (STACK_TMP2)(%rsp);
> +
> +       /* Update counter */
> +       addq $4, (12 * 4)(INPUT);
> +
> +       transpose_4x4(X0, X1, X2, X3, X13, X14, X15);
> +       movdqu X0, (64 * 0 + 16 * 0)(DST)
> +       movdqu X1, (64 * 1 + 16 * 0)(DST)
> +       movdqu X2, (64 * 2 + 16 * 0)(DST)
> +       movdqu X3, (64 * 3 + 16 * 0)(DST)
> +       transpose_4x4(X4, X5, X6, X7, X0, X1, X2);
> +       movdqa (STACK_TMP)(%rsp), X13;
> +       movdqa (STACK_TMP1)(%rsp), X14;
> +       movdqa (STACK_TMP2)(%rsp), X15;
> +       movdqu X4, (64 * 0 + 16 * 1)(DST)
> +       movdqu X5, (64 * 1 + 16 * 1)(DST)
> +       movdqu X6, (64 * 2 + 16 * 1)(DST)
> +       movdqu X7, (64 * 3 + 16 * 1)(DST)
> +       transpose_4x4(X8, X9, X10, X11, X0, X1, X2);
> +       movdqu X8,  (64 * 0 + 16 * 2)(DST)
> +       movdqu X9,  (64 * 1 + 16 * 2)(DST)
> +       movdqu X10, (64 * 2 + 16 * 2)(DST)
> +       movdqu X11, (64 * 3 + 16 * 2)(DST)
> +       transpose_4x4(X12, X13, X14, X15, X0, X1, X2);
> +       movdqu X12, (64 * 0 + 16 * 3)(DST)
> +       movdqu X13, (64 * 1 + 16 * 3)(DST)
> +       movdqu X14, (64 * 2 + 16 * 3)(DST)
> +       movdqu X15, (64 * 3 + 16 * 3)(DST)
> +
> +       sub $4, NBLKS;
> +       lea (4 * 64)(DST), DST;
> +       lea (4 * 64)(SRC), SRC;
> +       jnz L(loop4);
> +
> +       /* clear the used vector registers and stack */
> +       clear(X0);
> +       movdqa X0, (STACK_VEC_X12)(%rsp);
> +       movdqa X0, (STACK_VEC_X13)(%rsp);
> +       movdqa X0, (STACK_TMP)(%rsp);
> +       movdqa X0, (STACK_TMP1)(%rsp);
> +       movdqa X0, (STACK_TMP2)(%rsp);
> +       clear(X1);
> +       clear(X2);
> +       clear(X3);
> +       clear(X4);
> +       clear(X5);
> +       clear(X6);
> +       clear(X7);
> +       clear(X8);
> +       clear(X9);
> +       clear(X10);
> +       clear(X11);
> +       clear(X12);
> +       clear(X13);
> +       clear(X14);
> +       clear(X15);
> +
> +       /* eax zeroed by round loop. */
> +       leave;
> +       cfi_adjust_cfa_offset(-8)
> +       cfi_def_cfa_register(%rsp);
> +       ret_spec_stop;
> +END (__chacha20_sse2_blocks4)
> diff --git a/sysdeps/x86_64/chacha20_arch.h b/sysdeps/x86_64/chacha20_arch.h
> new file mode 100644
> index 0000000000..6fe5f77889
> --- /dev/null
> +++ b/sysdeps/x86_64/chacha20_arch.h
> @@ -0,0 +1,38 @@
> +/* Chacha20 implementation, used on arc4random.
> +   Copyright (C) 2022 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <ldsodefs.h>
> +#include <cpu-features.h>
> +#include <sys/param.h>
> +
> +unsigned int __chacha20_sse2_blocks4 (uint32_t *state, uint8_t *dst,
> +                                     const uint8_t *src, size_t nblks)
> +     attribute_hidden;
> +
> +static inline void
> +chacha20_crypt (struct chacha20_state *state, uint8_t *dst, const uint8_t *src,
> +               size_t bytes)
> +{
> +  _Static_assert (CHACHA20_BUFSIZE % 4 == 0,
> +                 "CHACHA20_BUFSIZE not multiple of 4");
> +  _Static_assert (CHACHA20_BUFSIZE > CHACHA20_BLOCK_SIZE * 8,
> +                 "CHACHA20_BUFSIZE <= CHACHA20_BLOCK_SIZE * 8");
> +
> +  __chacha20_sse2_blocks4 (state->ctx, dst, src,
> +                          CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
> +}
> --
> 2.32.0
>

LGTM as a first draft but some things we should look into in the future:

1. Optimizing the code
2. Using an ifunc to select optimized chacha version
3. Removing the clears at the end
  

Patch

diff --git a/LICENSES b/LICENSES
index 7288d281dc..415991e208 100644
--- a/LICENSES
+++ b/LICENSES
@@ -390,8 +390,8 @@  Copyright 2001 by Stephen L. Moshier <moshier@na-net.ornl.gov>
  License along with this library; if not, see
  <https://www.gnu.org/licenses/>.  */
 
-sysdeps/aarch64/chacha20.S imports code from libgcrypt, with the
-following notices:
+sysdeps/aarch64/chacha20.S and sysdeps/x86_64/chacha20-sse2.S
+import code from libgcrypt, with the following notices:
 
 Copyright (C) 2017-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
 
diff --git a/sysdeps/x86_64/Makefile b/sysdeps/x86_64/Makefile
index 79365aff2a..c8fbc30857 100644
--- a/sysdeps/x86_64/Makefile
+++ b/sysdeps/x86_64/Makefile
@@ -5,6 +5,12 @@  ifeq ($(subdir),csu)
 gen-as-const-headers += link-defines.sym
 endif
 
+ifeq ($(subdir),stdlib)
+sysdep_routines += \
+  chacha20-sse2 \
+  # sysdep_routines
+endif
+
 ifeq ($(subdir),gmon)
 sysdep_routines += _mcount
 # We cannot compile _mcount.S with -pg because that would create
diff --git a/sysdeps/x86_64/chacha20-sse2.S b/sysdeps/x86_64/chacha20-sse2.S
new file mode 100644
index 0000000000..7e39f3be9f
--- /dev/null
+++ b/sysdeps/x86_64/chacha20-sse2.S
@@ -0,0 +1,314 @@ 
+/* Optimized SSE2 implementation of ChaCha20 cipher.
+   Copyright (C) 2022 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+/* Based on D. J. Bernstein reference implementation at
+   http://cr.yp.to/chacha.html:
+
+   chacha-regs.c version 20080118
+   D. J. Bernstein
+   Public domain.  */
+
+#include <sysdep.h>
+
+#ifdef PIC
+#  define rRIP (%rip)
+#else
+#  define rRIP
+#endif
+
+/* 'ret' instruction replacement for straight-line speculation mitigation */
+#define ret_spec_stop \
+        ret; int3;
+
+/* register macros */
+#define INPUT %rdi
+#define DST   %rsi
+#define SRC   %rdx
+#define NBLKS %rcx
+#define ROUND %eax
+
+/* stack structure */
+#define STACK_VEC_X12 (16)
+#define STACK_VEC_X13 (16 + STACK_VEC_X12)
+#define STACK_TMP     (16 + STACK_VEC_X13)
+#define STACK_TMP1    (16 + STACK_TMP)
+#define STACK_TMP2    (16 + STACK_TMP1)
+
+#define STACK_MAX     (16 + STACK_TMP2)
+
+/* vector registers */
+#define X0 %xmm0
+#define X1 %xmm1
+#define X2 %xmm2
+#define X3 %xmm3
+#define X4 %xmm4
+#define X5 %xmm5
+#define X6 %xmm6
+#define X7 %xmm7
+#define X8 %xmm8
+#define X9 %xmm9
+#define X10 %xmm10
+#define X11 %xmm11
+#define X12 %xmm12
+#define X13 %xmm13
+#define X14 %xmm14
+#define X15 %xmm15
+
+/**********************************************************************
+  helper macros
+ **********************************************************************/
+
+/* 4x4 32-bit integer matrix transpose */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
+	movdqa    x0, t2; \
+	punpckhdq x1, t2; \
+	punpckldq x1, x0; \
+	\
+	movdqa    x2, t1; \
+	punpckldq x3, t1; \
+	punpckhdq x3, x2; \
+	\
+	movdqa     x0, x1; \
+	punpckhqdq t1, x1; \
+	punpcklqdq t1, x0; \
+	\
+	movdqa     t2, x3; \
+	punpckhqdq x2, x3; \
+	punpcklqdq x2, t2; \
+	movdqa     t2, x2;
+
+/* fill xmm register with 32-bit value from memory */
+#define pbroadcastd(mem32, xreg) \
+	movd mem32, xreg; \
+	pshufd $0, xreg, xreg;
+
+#define clear(x) pxor x,x;
+
+/**********************************************************************
+  4-way chacha20
+ **********************************************************************/
+
+#define ROTATE2(v1,v2,c,tmp1,tmp2)	\
+	movdqa v1, tmp1; 		\
+	movdqa v2, tmp2; 		\
+	psrld $(32 - (c)), v1;		\
+	pslld $(c), tmp1;		\
+	paddb tmp1, v1;			\
+	psrld $(32 - (c)), v2;		\
+	pslld $(c), tmp2;		\
+	paddb tmp2, v2;
+
+#define XOR(ds,s) \
+	pxor s, ds;
+
+#define PLUS(ds,s) \
+	paddd s, ds;
+
+#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2,\
+		      interleave_op1,interleave_op2)		\
+		interleave_op1;					\
+	PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2);	\
+	    ROTATE2(d1, d2, 16, tmp1, tmp2);			\
+	PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2);	\
+	    ROTATE2(b1, b2, 12, tmp1, tmp2);			\
+		interleave_op2;					\
+	PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2);	\
+	    ROTATE2(d1, d2, 8, tmp1, tmp2);			\
+	PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2);	\
+	    ROTATE2(b1, b2,  7, tmp1, tmp2);
+
+	.section .text.sse2,"ax",@progbits
+
+chacha20_data:
+	.align 16
+L(counter1):
+	.long 1,0,0,0
+L(inc_counter):
+	.long 0,1,2,3
+L(unsigned_cmp):
+	.long 0x80000000,0x80000000,0x80000000,0x80000000
+
+	.hidden __chacha20_sse2_blocks4
+ENTRY (__chacha20_sse2_blocks4)
+	/* input:
+	 *	%rdi: input
+	 *	%rsi: dst
+	 *	%rdx: src
+	 *	%rcx: nblks (multiple of 4)
+	 */
+
+	pushq %rbp;
+	cfi_adjust_cfa_offset(8);
+	cfi_rel_offset(rbp, 0)
+	movq %rsp, %rbp;
+	cfi_def_cfa_register(%rbp);
+
+	subq $STACK_MAX, %rsp;
+	andq $~15, %rsp;
+
+L(loop4):
+	mov $20, ROUND;
+
+	/* Construct counter vectors X12 and X13 */
+	movdqa L(inc_counter) rRIP, X0;
+	movdqa L(unsigned_cmp) rRIP, X2;
+	pbroadcastd((12 * 4)(INPUT), X12);
+	pbroadcastd((13 * 4)(INPUT), X13);
+	paddd X0, X12;
+	movdqa X12, X1;
+	pxor X2, X0;
+	pxor X2, X1;
+	pcmpgtd X1, X0;
+	psubd X0, X13;
+	movdqa X12, (STACK_VEC_X12)(%rsp);
+	movdqa X13, (STACK_VEC_X13)(%rsp);
+
+	/* Load vectors */
+	pbroadcastd((0 * 4)(INPUT), X0);
+	pbroadcastd((1 * 4)(INPUT), X1);
+	pbroadcastd((2 * 4)(INPUT), X2);
+	pbroadcastd((3 * 4)(INPUT), X3);
+	pbroadcastd((4 * 4)(INPUT), X4);
+	pbroadcastd((5 * 4)(INPUT), X5);
+	pbroadcastd((6 * 4)(INPUT), X6);
+	pbroadcastd((7 * 4)(INPUT), X7);
+	pbroadcastd((8 * 4)(INPUT), X8);
+	pbroadcastd((9 * 4)(INPUT), X9);
+	pbroadcastd((10 * 4)(INPUT), X10);
+	pbroadcastd((11 * 4)(INPUT), X11);
+	pbroadcastd((14 * 4)(INPUT), X14);
+	pbroadcastd((15 * 4)(INPUT), X15);
+	movdqa X11, (STACK_TMP)(%rsp);
+	movdqa X15, (STACK_TMP1)(%rsp);
+
+L(round2_4):
+	QUARTERROUND2(X0, X4,  X8, X12,   X1, X5,  X9, X13, tmp:=,X11,X15,,)
+	movdqa (STACK_TMP)(%rsp), X11;
+	movdqa (STACK_TMP1)(%rsp), X15;
+	movdqa X8, (STACK_TMP)(%rsp);
+	movdqa X9, (STACK_TMP1)(%rsp);
+	QUARTERROUND2(X2, X6, X10, X14,   X3, X7, X11, X15, tmp:=,X8,X9,,)
+	QUARTERROUND2(X0, X5, X10, X15,   X1, X6, X11, X12, tmp:=,X8,X9,,)
+	movdqa (STACK_TMP)(%rsp), X8;
+	movdqa (STACK_TMP1)(%rsp), X9;
+	movdqa X11, (STACK_TMP)(%rsp);
+	movdqa X15, (STACK_TMP1)(%rsp);
+	QUARTERROUND2(X2, X7,  X8, X13,   X3, X4,  X9, X14, tmp:=,X11,X15,,)
+	sub $2, ROUND;
+	jnz .Lround2_4;
+
+	/* tmp := X15 */
+	movdqa (STACK_TMP)(%rsp), X11;
+	pbroadcastd((0 * 4)(INPUT), X15);
+	PLUS(X0, X15);
+	pbroadcastd((1 * 4)(INPUT), X15);
+	PLUS(X1, X15);
+	pbroadcastd((2 * 4)(INPUT), X15);
+	PLUS(X2, X15);
+	pbroadcastd((3 * 4)(INPUT), X15);
+	PLUS(X3, X15);
+	pbroadcastd((4 * 4)(INPUT), X15);
+	PLUS(X4, X15);
+	pbroadcastd((5 * 4)(INPUT), X15);
+	PLUS(X5, X15);
+	pbroadcastd((6 * 4)(INPUT), X15);
+	PLUS(X6, X15);
+	pbroadcastd((7 * 4)(INPUT), X15);
+	PLUS(X7, X15);
+	pbroadcastd((8 * 4)(INPUT), X15);
+	PLUS(X8, X15);
+	pbroadcastd((9 * 4)(INPUT), X15);
+	PLUS(X9, X15);
+	pbroadcastd((10 * 4)(INPUT), X15);
+	PLUS(X10, X15);
+	pbroadcastd((11 * 4)(INPUT), X15);
+	PLUS(X11, X15);
+	movdqa (STACK_VEC_X12)(%rsp), X15;
+	PLUS(X12, X15);
+	movdqa (STACK_VEC_X13)(%rsp), X15;
+	PLUS(X13, X15);
+	movdqa X13, (STACK_TMP)(%rsp);
+	pbroadcastd((14 * 4)(INPUT), X15);
+	PLUS(X14, X15);
+	movdqa (STACK_TMP1)(%rsp), X15;
+	movdqa X14, (STACK_TMP1)(%rsp);
+	pbroadcastd((15 * 4)(INPUT), X13);
+	PLUS(X15, X13);
+	movdqa X15, (STACK_TMP2)(%rsp);
+
+	/* Update counter */
+	addq $4, (12 * 4)(INPUT);
+
+	transpose_4x4(X0, X1, X2, X3, X13, X14, X15);
+	movdqu X0, (64 * 0 + 16 * 0)(DST)
+	movdqu X1, (64 * 1 + 16 * 0)(DST)
+	movdqu X2, (64 * 2 + 16 * 0)(DST)
+	movdqu X3, (64 * 3 + 16 * 0)(DST)
+	transpose_4x4(X4, X5, X6, X7, X0, X1, X2);
+	movdqa (STACK_TMP)(%rsp), X13;
+	movdqa (STACK_TMP1)(%rsp), X14;
+	movdqa (STACK_TMP2)(%rsp), X15;
+	movdqu X4, (64 * 0 + 16 * 1)(DST)
+	movdqu X5, (64 * 1 + 16 * 1)(DST)
+	movdqu X6, (64 * 2 + 16 * 1)(DST)
+	movdqu X7, (64 * 3 + 16 * 1)(DST)
+	transpose_4x4(X8, X9, X10, X11, X0, X1, X2);
+	movdqu X8,  (64 * 0 + 16 * 2)(DST)
+	movdqu X9,  (64 * 1 + 16 * 2)(DST)
+	movdqu X10, (64 * 2 + 16 * 2)(DST)
+	movdqu X11, (64 * 3 + 16 * 2)(DST)
+	transpose_4x4(X12, X13, X14, X15, X0, X1, X2);
+	movdqu X12, (64 * 0 + 16 * 3)(DST)
+	movdqu X13, (64 * 1 + 16 * 3)(DST)
+	movdqu X14, (64 * 2 + 16 * 3)(DST)
+	movdqu X15, (64 * 3 + 16 * 3)(DST)
+
+	sub $4, NBLKS;
+	lea (4 * 64)(DST), DST;
+	lea (4 * 64)(SRC), SRC;
+	jnz L(loop4);
+
+	/* clear the used vector registers and stack */
+	clear(X0);
+	movdqa X0, (STACK_VEC_X12)(%rsp);
+	movdqa X0, (STACK_VEC_X13)(%rsp);
+	movdqa X0, (STACK_TMP)(%rsp);
+	movdqa X0, (STACK_TMP1)(%rsp);
+	movdqa X0, (STACK_TMP2)(%rsp);
+	clear(X1);
+	clear(X2);
+	clear(X3);
+	clear(X4);
+	clear(X5);
+	clear(X6);
+	clear(X7);
+	clear(X8);
+	clear(X9);
+	clear(X10);
+	clear(X11);
+	clear(X12);
+	clear(X13);
+	clear(X14);
+	clear(X15);
+
+	/* eax zeroed by round loop. */
+	leave;
+	cfi_adjust_cfa_offset(-8)
+	cfi_def_cfa_register(%rsp);
+	ret_spec_stop;
+END (__chacha20_sse2_blocks4)
diff --git a/sysdeps/x86_64/chacha20_arch.h b/sysdeps/x86_64/chacha20_arch.h
new file mode 100644
index 0000000000..6fe5f77889
--- /dev/null
+++ b/sysdeps/x86_64/chacha20_arch.h
@@ -0,0 +1,38 @@ 
+/* Chacha20 implementation, used on arc4random.
+   Copyright (C) 2022 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <ldsodefs.h>
+#include <cpu-features.h>
+#include <sys/param.h>
+
+unsigned int __chacha20_sse2_blocks4 (uint32_t *state, uint8_t *dst,
+				      const uint8_t *src, size_t nblks)
+     attribute_hidden;
+
+static inline void
+chacha20_crypt (struct chacha20_state *state, uint8_t *dst, const uint8_t *src,
+		size_t bytes)
+{
+  _Static_assert (CHACHA20_BUFSIZE % 4 == 0,
+		  "CHACHA20_BUFSIZE not multiple of 4");
+  _Static_assert (CHACHA20_BUFSIZE > CHACHA20_BLOCK_SIZE * 8,
+		  "CHACHA20_BUFSIZE <= CHACHA20_BLOCK_SIZE * 8");
+
+  __chacha20_sse2_blocks4 (state->ctx, dst, src,
+			   CHACHA20_BUFSIZE / CHACHA20_BLOCK_SIZE);
+}