From patchwork Tue Jun 28 15:27:56 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Noah Goldstein X-Patchwork-Id: 55503 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id B4A9F38CD064 for ; Tue, 28 Jun 2022 15:29:27 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org B4A9F38CD064 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1656430167; bh=UtvYYtDL3pLZiPqBszOCN5q3gAXohtEXEey+2X3hVMU=; h=To:Subject:Date:List-Id:List-Unsubscribe:List-Archive:List-Post: List-Help:List-Subscribe:From:Reply-To:From; b=UUF9oC1EFG/TFmxEXt7X4HOpMmD80COnh0qsEGCflSBfXXALLmF6/qc/xC52By+mg 62JWNEmO8tYZkSgcnjlc4GX3TM4GezYjSIcANi+gpeF16EwwdH6m4EXu0n/wEI4y+2 sqEZLrP+NT+OAsaJX+cQEPH3W8WQFe4W4zcQArI0= X-Original-To: libc-alpha@sourceware.org Delivered-To: libc-alpha@sourceware.org Received: from mail-pg1-x533.google.com (mail-pg1-x533.google.com [IPv6:2607:f8b0:4864:20::533]) by sourceware.org (Postfix) with ESMTPS id 21FAF38A8A53 for ; Tue, 28 Jun 2022 15:28:01 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org 21FAF38A8A53 Received: by mail-pg1-x533.google.com with SMTP id 9so12531728pgd.7 for ; Tue, 28 Jun 2022 08:28:01 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:mime-version :content-transfer-encoding; bh=UtvYYtDL3pLZiPqBszOCN5q3gAXohtEXEey+2X3hVMU=; b=Afc1jHkEv8EvgagcS3gpghpQWGYFIyQWXkr9Td6Rj9LYkdAgReg447Gc0dd4x4kQws LzhqCjZnx+fRSwnS/SPNnc8is8XHyB05r2iCDh+JMSpOtEiHSkFPMs2ndv89qn3NYKkT B0PPAACNvlEnEPFGeK/Amh8pV/GcN8ISSHUQea3WTuplMcubXWJSoX0tdVDeNcHL3t7F V0b/YzqEHcD5l8klNIocQJaQHY9WHFFLg3tswL6cc9yvqc9n55E9/kHGEppK7nZv30vF eLASRHt/rtLzHEqSCsqSFKaKD02r1/v6n8HS/t5LXKaGexM48EFe1AkyUL77ISM6KP0f 6UeA== X-Gm-Message-State: AJIora/YDgxqvG1I99cpkt5UxdrgKuEdh4SJlaclYUm5U/0D26HRsb8l A4SQm38W+GlWaOtsPxiMd52cAQVA5G4= X-Google-Smtp-Source: AGRyM1v9/m3mpwgw/zLzbWEl9uWF1OmSiuNd2OZnb24TiBR9VdNZzhkzjN+wCE0ly4awYDoU7DoS6Q== X-Received: by 2002:a63:794e:0:b0:40d:99b:bb4 with SMTP id u75-20020a63794e000000b0040d099b0bb4mr18293265pgc.133.1656430080054; Tue, 28 Jun 2022 08:28:00 -0700 (PDT) Received: from noah-tgl.. ([192.55.60.43]) by smtp.gmail.com with ESMTPSA id f7-20020a170902684700b0016a68098e8fsm3880980pln.242.2022.06.28.08.27.59 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 28 Jun 2022 08:27:59 -0700 (PDT) To: libc-alpha@sourceware.org Subject: [PATCH v1 1/2] x86: Move mem{p}{mov|cpy}_{chk_}erms to its own file Date: Tue, 28 Jun 2022 08:27:56 -0700 Message-Id: <20220628152757.17922-1-goldstein.w.n@gmail.com> X-Mailer: git-send-email 2.34.1 MIME-Version: 1.0 X-Spam-Status: No, score=-12.1 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP, T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Noah Goldstein via Libc-alpha From: Noah Goldstein Reply-To: Noah Goldstein Errors-To: libc-alpha-bounces+patchwork=sourceware.org@sourceware.org Sender: "Libc-alpha" The primary memmove_{impl}_unaligned_erms implementations don't interact with this function. Putting them in same file both wastes space and unnecessarily bloats a hot code section. --- sysdeps/x86_64/multiarch/memmove-erms.S | 53 +++++++++++++++++++ .../multiarch/memmove-vec-unaligned-erms.S | 50 ----------------- 2 files changed, 53 insertions(+), 50 deletions(-) create mode 100644 sysdeps/x86_64/multiarch/memmove-erms.S diff --git a/sysdeps/x86_64/multiarch/memmove-erms.S b/sysdeps/x86_64/multiarch/memmove-erms.S new file mode 100644 index 0000000000..d98d21644b --- /dev/null +++ b/sysdeps/x86_64/multiarch/memmove-erms.S @@ -0,0 +1,53 @@ +#include + +#if defined USE_MULTIARCH && IS_IN (libc) + .text +ENTRY (__mempcpy_chk_erms) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (__mempcpy_chk_erms) + +/* Only used to measure performance of REP MOVSB. */ +ENTRY (__mempcpy_erms) + mov %RDI_LP, %RAX_LP + /* Skip zero length. */ + test %RDX_LP, %RDX_LP + jz 2f + add %RDX_LP, %RAX_LP + jmp L(start_movsb) +END (__mempcpy_erms) + +ENTRY (__memmove_chk_erms) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (__memmove_chk_erms) + +ENTRY (__memmove_erms) + movq %rdi, %rax + /* Skip zero length. */ + test %RDX_LP, %RDX_LP + jz 2f +L(start_movsb): + mov %RDX_LP, %RCX_LP + cmp %RSI_LP, %RDI_LP + jb 1f + /* Source == destination is less common. */ + je 2f + lea (%rsi,%rcx), %RDX_LP + cmp %RDX_LP, %RDI_LP + jb L(movsb_backward) +1: + rep movsb +2: + ret +L(movsb_backward): + leaq -1(%rdi,%rcx), %rdi + leaq -1(%rsi,%rcx), %rsi + std + rep movsb + cld + ret +END (__memmove_erms) +strong_alias (__memmove_erms, __memcpy_erms) +strong_alias (__memmove_chk_erms, __memcpy_chk_erms) +#endif diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S index d1518b8bab..04747133b7 100644 --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S @@ -239,56 +239,6 @@ L(start): #endif #if defined USE_MULTIARCH && IS_IN (libc) END (MEMMOVE_SYMBOL (__memmove, unaligned)) -# if VEC_SIZE == 16 -ENTRY (__mempcpy_chk_erms) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) -END (__mempcpy_chk_erms) - -/* Only used to measure performance of REP MOVSB. */ -ENTRY (__mempcpy_erms) - mov %RDI_LP, %RAX_LP - /* Skip zero length. */ - test %RDX_LP, %RDX_LP - jz 2f - add %RDX_LP, %RAX_LP - jmp L(start_movsb) -END (__mempcpy_erms) - -ENTRY (__memmove_chk_erms) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) -END (__memmove_chk_erms) - -ENTRY (__memmove_erms) - movq %rdi, %rax - /* Skip zero length. */ - test %RDX_LP, %RDX_LP - jz 2f -L(start_movsb): - mov %RDX_LP, %RCX_LP - cmp %RSI_LP, %RDI_LP - jb 1f - /* Source == destination is less common. */ - je 2f - lea (%rsi,%rcx), %RDX_LP - cmp %RDX_LP, %RDI_LP - jb L(movsb_backward) -1: - rep movsb -2: - ret -L(movsb_backward): - leaq -1(%rdi,%rcx), %rdi - leaq -1(%rsi,%rcx), %rsi - std - rep movsb - cld - ret -END (__memmove_erms) -strong_alias (__memmove_erms, __memcpy_erms) -strong_alias (__memmove_chk_erms, __memcpy_chk_erms) -# endif # ifdef SHARED ENTRY (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned_erms)) From patchwork Tue Jun 28 15:27:57 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Noah Goldstein X-Patchwork-Id: 55505 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id AE69E38D0A0E for ; Tue, 28 Jun 2022 15:30:15 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org AE69E38D0A0E DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1656430215; bh=phJHZNc5ZvyidUWjIUanDtoZJDWVleSiktDmzogy/Lk=; h=To:Subject:Date:In-Reply-To:References:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To: From; b=TuwJL0IcXuakdEB+HLvEH9GBGdJ+X2TsprDInLUnwdV02GtltMlGJExCVrNqtTa6V 7bLaVct64QUCPyt8tSz1kzP/VmKllbVWsJbg7FRV9I3go8IFF6UnASN2Yi5Uru2Byr ZfBnjCpRv0J18hVryMOf6BumsLdZEHshbZPHqSQ4= X-Original-To: libc-alpha@sourceware.org Delivered-To: libc-alpha@sourceware.org Received: from mail-pl1-x632.google.com (mail-pl1-x632.google.com [IPv6:2607:f8b0:4864:20::632]) by sourceware.org (Postfix) with ESMTPS id A9ADA38A90A8 for ; Tue, 28 Jun 2022 15:28:02 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org A9ADA38A90A8 Received: by mail-pl1-x632.google.com with SMTP id m14so11396869plg.5 for ; Tue, 28 Jun 2022 08:28:02 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=phJHZNc5ZvyidUWjIUanDtoZJDWVleSiktDmzogy/Lk=; b=vMMtkq1t7IkokIZXX3ZIBVtBUGkDxaF7X8rA+xtW0MjUtAnvjUBm6chO/rX7lwjDgL gwtW6u/tdTavosBoYweXBYEE0sJPvviqT4VUdW53qzwWMZfZwTrN3wwjhokyb66FQyZp o7M7pfxqoG6H5PBoqAFCP1AcS+vsgX/UgunFKDQZfnVWtjAOzBfqu6lqbzOuMWdybRXW QHVmi8eId2aYkIRk5EpGNOPmMDLEaVe33L9C4eo5RAe07JWQH/6HjmTJsorHf0dCrkCY Wu2H+LRlzYzIejS9CDGF04uwf9AzI0u/G8BKU4DSYW7A2Y8RxSqjOooCw+h1qLDZN0eh zyrw== X-Gm-Message-State: AJIora8cjVxO5WnQ9ZFnaprJaH2CmErVKZnvW/+l9ki5U/JU2lql1fYj VcSlRDlRXXtodU0A99+VXPCxOba19Hc= X-Google-Smtp-Source: AGRyM1vfbEpH7/o2c18VfBhXowHBeM3IYoMZA1cJIE44PakboLsBZ9J8x0ozrBulK8I0zfv236l1IQ== X-Received: by 2002:a17:90b:1a81:b0:1ed:3c0:3abb with SMTP id ng1-20020a17090b1a8100b001ed03c03abbmr169282pjb.5.1656430081168; Tue, 28 Jun 2022 08:28:01 -0700 (PDT) Received: from noah-tgl.. ([192.55.60.43]) by smtp.gmail.com with ESMTPSA id f7-20020a170902684700b0016a68098e8fsm3880980pln.242.2022.06.28.08.28.00 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 28 Jun 2022 08:28:00 -0700 (PDT) To: libc-alpha@sourceware.org Subject: [PATCH v1 2/2] x86: Add support for building {w}memmove{_chk} with explicit ISA level Date: Tue, 28 Jun 2022 08:27:57 -0700 Message-Id: <20220628152757.17922-2-goldstein.w.n@gmail.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220628152757.17922-1-goldstein.w.n@gmail.com> References: <20220628152757.17922-1-goldstein.w.n@gmail.com> MIME-Version: 1.0 X-Spam-Status: No, score=-12.1 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, KAM_SHORT, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP, T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Noah Goldstein via Libc-alpha From: Noah Goldstein Reply-To: Noah Goldstein Errors-To: libc-alpha-bounces+patchwork=sourceware.org@sourceware.org Sender: "Libc-alpha" 1. Refactor files so that all implementations are in the multiarch directory - Moved the implementation portion of memmove sse2 from memmove.S to multiarch/memmove-sse2.S - The non-multiarch file now only includes one of the implementations in the multiarch directory based on the compiled ISA level (only used for non-multiarch builds. Otherwise we go through the ifunc selector). 2. Add ISA level build guards to different implementations. - I.e memmove-avx2-unaligned-erms.S which is ISA level 3 will only build if compiled ISA level <= 3. Otherwise there is no reason to include it as we will always use one of the ISA level 4 implementations (memmove-evex-unaligned-erms.S). 3. Add new multiarch/rtld-memmove.S that just include the non-multiarch memmove.S which will in turn select the best implementation based on the compiled ISA level. 4. Refactor the ifunc selector and ifunc implementation list to use the ISA level aware wrapper macros that allow functions below the compiled ISA level (with a guranteed replacement) to be skipped. Tested with and without multiarch on x86_64 for ISA levels: {generic, x86-64-v2, x86-64-v3, x86-64-v4} And m32 with and without multiarch. isa raising memmove --- sysdeps/x86_64/memmove.S | 34 +-- sysdeps/x86_64/multiarch/Makefile | 1 + sysdeps/x86_64/multiarch/ifunc-impl-list.c | 275 ++++++++++-------- sysdeps/x86_64/multiarch/ifunc-memmove.h | 47 +-- .../multiarch/memmove-avx-unaligned-erms.S | 15 +- .../multiarch/memmove-avx512-no-vzeroupper.S | 3 +- .../multiarch/memmove-avx512-unaligned-erms.S | 11 +- .../multiarch/memmove-evex-unaligned-erms.S | 16 +- .../x86_64/multiarch/memmove-shlib-compat.h | 26 ++ .../multiarch/memmove-sse2-unaligned-erms.S | 36 ++- sysdeps/x86_64/multiarch/memmove-ssse3.S | 41 +-- sysdeps/x86_64/multiarch/rtld-memmove.S | 18 ++ 12 files changed, 327 insertions(+), 196 deletions(-) create mode 100644 sysdeps/x86_64/multiarch/memmove-shlib-compat.h create mode 100644 sysdeps/x86_64/multiarch/rtld-memmove.S diff --git a/sysdeps/x86_64/memmove.S b/sysdeps/x86_64/memmove.S index 78e8d974d9..ef1b2496f4 100644 --- a/sysdeps/x86_64/memmove.S +++ b/sysdeps/x86_64/memmove.S @@ -16,17 +16,6 @@ License along with the GNU C Library; if not, see . */ -#include - -#define VEC_SIZE 16 -#define VEC(i) xmm##i -#define PREFETCHNT prefetchnta -#define VMOVNT movntdq -/* Use movups and movaps for smaller code sizes. */ -#define VMOVU movups -#define VMOVA movaps -#define MOV_SIZE 3 -#define SECTION(p) p #ifdef USE_MULTIARCH # if !IS_IN (libc) @@ -42,12 +31,20 @@ #if !defined USE_MULTIARCH || !IS_IN (libc) # define MEMPCPY_SYMBOL(p,s) __mempcpy #endif -#ifndef MEMMOVE_SYMBOL -# define MEMMOVE_CHK_SYMBOL(p,s) p -# define MEMMOVE_SYMBOL(p,s) memmove -#endif -#include "multiarch/memmove-vec-unaligned-erms.S" +#define MEMMOVE_CHK_SYMBOL(p,s) p +#define MEMMOVE_SYMBOL(p,s) memmove + + +#define DEFAULT_IMPL_V1 "multiarch/memmove-sse2-unaligned-erms.S" +#define DEFAULT_IMPL_V3 "multiarch/memmove-avx-unaligned-erms.S" +#define DEFAULT_IMPL_V4 "multiarch/memmove-evex-unaligned-erms.S" + +#include "isa-default-impl.h" + +#if defined USE_MULTIARCH && !IS_IN (libc) +weak_alias (__mempcpy, mempcpy) +#endif #ifndef USE_MULTIARCH libc_hidden_builtin_def (memmove) @@ -59,13 +56,10 @@ libc_hidden_def (__mempcpy) weak_alias (__mempcpy, mempcpy) libc_hidden_builtin_def (mempcpy) + # if defined SHARED && IS_IN (libc) # undef memcpy # include versioned_symbol (libc, __memcpy, memcpy, GLIBC_2_14); - -# if SHLIB_COMPAT (libc, GLIBC_2_2_5, GLIBC_2_14) -compat_symbol (libc, memmove, memcpy, GLIBC_2_2_5); -# endif # endif #endif diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile index 0525cef3fe..18cea04423 100644 --- a/sysdeps/x86_64/multiarch/Makefile +++ b/sysdeps/x86_64/multiarch/Makefile @@ -18,6 +18,7 @@ sysdep_routines += \ memmove-avx-unaligned-erms-rtm \ memmove-avx512-no-vzeroupper \ memmove-avx512-unaligned-erms \ + memmove-erms \ memmove-evex-unaligned-erms \ memmove-sse2-unaligned-erms \ memmove-ssse3 \ diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c index 4450e2c593..5f7a0aa186 100644 --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c @@ -109,83 +109,93 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, #ifdef SHARED /* Support sysdeps/x86_64/multiarch/memmove_chk.c. */ IFUNC_IMPL (i, name, __memmove_chk, - IFUNC_IMPL_ADD (array, i, __memmove_chk, + IFUNC_IMPL_ADD (array, i, __memmove_chk, 1, + __memmove_chk_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk, CPU_FEATURE_USABLE (AVX512F), __memmove_chk_avx512_no_vzeroupper) - IFUNC_IMPL_ADD (array, i, __memmove_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk, CPU_FEATURE_USABLE (AVX512VL), __memmove_chk_avx512_unaligned) - IFUNC_IMPL_ADD (array, i, __memmove_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk, CPU_FEATURE_USABLE (AVX512VL), __memmove_chk_avx512_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memmove_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_chk_evex_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, __memmove_chk, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_chk_evex_unaligned_erms) + X86_IFUNC_IMPL_ADD_V3 (array, i, __memmove_chk, CPU_FEATURE_USABLE (AVX), __memmove_chk_avx_unaligned) - IFUNC_IMPL_ADD (array, i, __memmove_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memmove_chk, CPU_FEATURE_USABLE (AVX), __memmove_chk_avx_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memmove_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memmove_chk, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __memmove_chk_avx_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, __memmove_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memmove_chk, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __memmove_chk_avx_unaligned_erms_rtm) - IFUNC_IMPL_ADD (array, i, __memmove_chk, - CPU_FEATURE_USABLE (AVX512VL), - __memmove_chk_evex_unaligned) - IFUNC_IMPL_ADD (array, i, __memmove_chk, - CPU_FEATURE_USABLE (AVX512VL), - __memmove_chk_evex_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memmove_chk, + /* By V3 we assume fast aligned copy. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, __memmove_chk, CPU_FEATURE_USABLE (SSSE3), __memmove_chk_ssse3) - IFUNC_IMPL_ADD (array, i, __memmove_chk, 1, + /* Can be lowered to V1 if a general V2 implementation is + added (ssse3 is too optimized around aligned copy to be + better as general purpose memmove). */ + X86_IFUNC_IMPL_ADD_V2 (array, i, __memmove_chk, 1, __memmove_chk_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, __memmove_chk, 1, - __memmove_chk_sse2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memmove_chk, 1, - __memmove_chk_erms)) + X86_IFUNC_IMPL_ADD_V2 (array, i, __memmove_chk, 1, + __memmove_chk_sse2_unaligned_erms)) #endif /* Support sysdeps/x86_64/multiarch/memmove.c. */ IFUNC_IMPL (i, name, memmove, - IFUNC_IMPL_ADD (array, i, memmove, + IFUNC_IMPL_ADD (array, i, memmove, 1, + __memmove_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, memmove, + CPU_FEATURE_USABLE (AVX512F), + __memmove_avx512_no_vzeroupper) + X86_IFUNC_IMPL_ADD_V4 (array, i, memmove, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_avx512_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, memmove, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_avx512_unaligned_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, memmove, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_evex_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, memmove, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_evex_unaligned_erms) + X86_IFUNC_IMPL_ADD_V3 (array, i, memmove, CPU_FEATURE_USABLE (AVX), __memmove_avx_unaligned) - IFUNC_IMPL_ADD (array, i, memmove, + X86_IFUNC_IMPL_ADD_V3 (array, i, memmove, CPU_FEATURE_USABLE (AVX), __memmove_avx_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memmove, + X86_IFUNC_IMPL_ADD_V3 (array, i, memmove, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __memmove_avx_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, memmove, + X86_IFUNC_IMPL_ADD_V3 (array, i, memmove, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __memmove_avx_unaligned_erms_rtm) - IFUNC_IMPL_ADD (array, i, memmove, - CPU_FEATURE_USABLE (AVX512VL), - __memmove_evex_unaligned) - IFUNC_IMPL_ADD (array, i, memmove, - CPU_FEATURE_USABLE (AVX512VL), - __memmove_evex_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memmove, - CPU_FEATURE_USABLE (AVX512F), - __memmove_avx512_no_vzeroupper) - IFUNC_IMPL_ADD (array, i, memmove, - CPU_FEATURE_USABLE (AVX512VL), - __memmove_avx512_unaligned) - IFUNC_IMPL_ADD (array, i, memmove, - CPU_FEATURE_USABLE (AVX512VL), - __memmove_avx512_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memmove, CPU_FEATURE_USABLE (SSSE3), + /* By V3 we assume fast aligned copy. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, memmove, + CPU_FEATURE_USABLE (SSSE3), __memmove_ssse3) - IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_erms) - IFUNC_IMPL_ADD (array, i, memmove, 1, + /* Can be lowered to V1 if a general V2 implementation is + added (ssse3 is too optimized around aligned copy to be + better as general purpose memmove). */ + X86_IFUNC_IMPL_ADD_V2 (array, i, memmove, 1, __memmove_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, memmove, 1, + X86_IFUNC_IMPL_ADD_V2 (array, i, memmove, 1, __memmove_sse2_unaligned_erms)) /* Support sysdeps/x86_64/multiarch/memrchr.c. */ @@ -847,165 +857,186 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, #ifdef SHARED /* Support sysdeps/x86_64/multiarch/memcpy_chk.c. */ IFUNC_IMPL (i, name, __memcpy_chk, - IFUNC_IMPL_ADD (array, i, __memcpy_chk, + IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1, + __memcpy_chk_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk, CPU_FEATURE_USABLE (AVX512F), __memcpy_chk_avx512_no_vzeroupper) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk, CPU_FEATURE_USABLE (AVX512VL), __memcpy_chk_avx512_unaligned) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk, CPU_FEATURE_USABLE (AVX512VL), __memcpy_chk_avx512_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_chk_evex_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, __memcpy_chk, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_chk_evex_unaligned_erms) + X86_IFUNC_IMPL_ADD_V3 (array, i, __memcpy_chk, CPU_FEATURE_USABLE (AVX), __memcpy_chk_avx_unaligned) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memcpy_chk, CPU_FEATURE_USABLE (AVX), __memcpy_chk_avx_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memcpy_chk, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __memcpy_chk_avx_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memcpy_chk, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __memcpy_chk_avx_unaligned_erms_rtm) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, - CPU_FEATURE_USABLE (AVX512VL), - __memcpy_chk_evex_unaligned) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, - CPU_FEATURE_USABLE (AVX512VL), - __memcpy_chk_evex_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, + /* By V3 we assume fast aligned copy. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, __memcpy_chk, CPU_FEATURE_USABLE (SSSE3), __memcpy_chk_ssse3) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1, + /* Can be lowered to V1 if a general V2 implementation is + added (ssse3 is too optimized around aligned copy to be + better as general purpose memmove). */ + X86_IFUNC_IMPL_ADD_V2 (array, i, __memcpy_chk, 1, __memcpy_chk_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1, - __memcpy_chk_sse2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1, - __memcpy_chk_erms)) + X86_IFUNC_IMPL_ADD_V2 (array, i, __memcpy_chk, 1, + __memcpy_chk_sse2_unaligned_erms)) #endif /* Support sysdeps/x86_64/multiarch/memcpy.c. */ IFUNC_IMPL (i, name, memcpy, - IFUNC_IMPL_ADD (array, i, memcpy, + IFUNC_IMPL_ADD (array, i, memcpy, 1, + __memcpy_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy, + CPU_FEATURE_USABLE (AVX512F), + __memcpy_avx512_no_vzeroupper) + X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_avx512_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_avx512_unaligned_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_evex_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, memcpy, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_evex_unaligned_erms) + X86_IFUNC_IMPL_ADD_V3 (array, i, memcpy, CPU_FEATURE_USABLE (AVX), __memcpy_avx_unaligned) - IFUNC_IMPL_ADD (array, i, memcpy, + X86_IFUNC_IMPL_ADD_V3 (array, i, memcpy, CPU_FEATURE_USABLE (AVX), __memcpy_avx_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memcpy, + X86_IFUNC_IMPL_ADD_V3 (array, i, memcpy, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __memcpy_avx_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, memcpy, + X86_IFUNC_IMPL_ADD_V3 (array, i, memcpy, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __memcpy_avx_unaligned_erms_rtm) - IFUNC_IMPL_ADD (array, i, memcpy, - CPU_FEATURE_USABLE (AVX512VL), - __memcpy_evex_unaligned) - IFUNC_IMPL_ADD (array, i, memcpy, - CPU_FEATURE_USABLE (AVX512VL), - __memcpy_evex_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memcpy, CPU_FEATURE_USABLE (SSSE3), + /* By V3 we assume fast aligned copy. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, memcpy, + CPU_FEATURE_USABLE (SSSE3), __memcpy_ssse3) - IFUNC_IMPL_ADD (array, i, memcpy, - CPU_FEATURE_USABLE (AVX512F), - __memcpy_avx512_no_vzeroupper) - IFUNC_IMPL_ADD (array, i, memcpy, - CPU_FEATURE_USABLE (AVX512VL), - __memcpy_avx512_unaligned) - IFUNC_IMPL_ADD (array, i, memcpy, - CPU_FEATURE_USABLE (AVX512VL), - __memcpy_avx512_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, memcpy, 1, - __memcpy_sse2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_erms)) + /* Can be lowered to V1 if a general V2 implementation is + added (ssse3 is too optimized around aligned copy to be + better as general purpose memmove). */ + X86_IFUNC_IMPL_ADD_V2 (array, i, memcpy, 1, + __memcpy_sse2_unaligned) + X86_IFUNC_IMPL_ADD_V2 (array, i, memcpy, 1, + __memcpy_sse2_unaligned_erms)) #ifdef SHARED /* Support sysdeps/x86_64/multiarch/mempcpy_chk.c. */ IFUNC_IMPL (i, name, __mempcpy_chk, - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + IFUNC_IMPL_ADD (array, i, __mempcpy_chk, 1, + __mempcpy_chk_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (AVX512F), __mempcpy_chk_avx512_no_vzeroupper) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (AVX512VL), __mempcpy_chk_avx512_unaligned) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (AVX512VL), __mempcpy_chk_avx512_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk, + CPU_FEATURE_USABLE (AVX512VL), + __mempcpy_chk_evex_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, __mempcpy_chk, + CPU_FEATURE_USABLE (AVX512VL), + __mempcpy_chk_evex_unaligned_erms) + X86_IFUNC_IMPL_ADD_V3 (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (AVX), __mempcpy_chk_avx_unaligned) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (AVX), __mempcpy_chk_avx_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __mempcpy_chk, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __mempcpy_chk_avx_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __mempcpy_chk, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __mempcpy_chk_avx_unaligned_erms_rtm) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, - CPU_FEATURE_USABLE (AVX512VL), - __mempcpy_chk_evex_unaligned) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, - CPU_FEATURE_USABLE (AVX512VL), - __mempcpy_chk_evex_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + /* By V3 we assume fast aligned copy. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (SSSE3), __mempcpy_chk_ssse3) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, 1, + /* Can be lowered to V1 if a general V2 implementation is + added (ssse3 is too optimized around aligned copy to be + better as general purpose memmove). */ + X86_IFUNC_IMPL_ADD_V2 (array, i, __mempcpy_chk, 1, __mempcpy_chk_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, 1, - __mempcpy_chk_sse2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __mempcpy_chk, 1, - __mempcpy_chk_erms)) + X86_IFUNC_IMPL_ADD_V2 (array, i, __mempcpy_chk, 1, + __mempcpy_chk_sse2_unaligned_erms)) #endif /* Support sysdeps/x86_64/multiarch/mempcpy.c. */ IFUNC_IMPL (i, name, mempcpy, - IFUNC_IMPL_ADD (array, i, mempcpy, + IFUNC_IMPL_ADD (array, i, mempcpy, 1, + __mempcpy_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy, CPU_FEATURE_USABLE (AVX512F), __mempcpy_avx512_no_vzeroupper) - IFUNC_IMPL_ADD (array, i, mempcpy, + X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy, CPU_FEATURE_USABLE (AVX512VL), __mempcpy_avx512_unaligned) - IFUNC_IMPL_ADD (array, i, mempcpy, + X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy, CPU_FEATURE_USABLE (AVX512VL), __mempcpy_avx512_unaligned_erms) - IFUNC_IMPL_ADD (array, i, mempcpy, + X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy, + CPU_FEATURE_USABLE (AVX512VL), + __mempcpy_evex_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, mempcpy, + CPU_FEATURE_USABLE (AVX512VL), + __mempcpy_evex_unaligned_erms) + X86_IFUNC_IMPL_ADD_V3 (array, i, mempcpy, CPU_FEATURE_USABLE (AVX), __mempcpy_avx_unaligned) - IFUNC_IMPL_ADD (array, i, mempcpy, + X86_IFUNC_IMPL_ADD_V3 (array, i, mempcpy, CPU_FEATURE_USABLE (AVX), __mempcpy_avx_unaligned_erms) - IFUNC_IMPL_ADD (array, i, mempcpy, + X86_IFUNC_IMPL_ADD_V3 (array, i, mempcpy, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __mempcpy_avx_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, mempcpy, + X86_IFUNC_IMPL_ADD_V3 (array, i, mempcpy, (CPU_FEATURE_USABLE (AVX) && CPU_FEATURE_USABLE (RTM)), __mempcpy_avx_unaligned_erms_rtm) - IFUNC_IMPL_ADD (array, i, mempcpy, - CPU_FEATURE_USABLE (AVX512VL), - __mempcpy_evex_unaligned) - IFUNC_IMPL_ADD (array, i, mempcpy, - CPU_FEATURE_USABLE (AVX512VL), - __mempcpy_evex_unaligned_erms) - IFUNC_IMPL_ADD (array, i, mempcpy, CPU_FEATURE_USABLE (SSSE3), + /* By V3 we assume fast aligned copy. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, mempcpy, + CPU_FEATURE_USABLE (SSSE3), __mempcpy_ssse3) - IFUNC_IMPL_ADD (array, i, mempcpy, 1, + /* Can be lowered to V1 if a general V2 implementation is + added (ssse3 is too optimized around aligned copy to be + better as general purpose memmove). */ + X86_IFUNC_IMPL_ADD_V2 (array, i, mempcpy, 1, __mempcpy_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, mempcpy, 1, - __mempcpy_sse2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, mempcpy, 1, __mempcpy_erms)) + X86_IFUNC_IMPL_ADD_V2 (array, i, mempcpy, 1, + __mempcpy_sse2_unaligned_erms)) /* Support sysdeps/x86_64/multiarch/strncmp.c. */ IFUNC_IMPL (i, name, strncmp, diff --git a/sysdeps/x86_64/multiarch/ifunc-memmove.h b/sysdeps/x86_64/multiarch/ifunc-memmove.h index fb01fbb301..1643d32887 100644 --- a/sysdeps/x86_64/multiarch/ifunc-memmove.h +++ b/sysdeps/x86_64/multiarch/ifunc-memmove.h @@ -20,11 +20,19 @@ #include extern __typeof (REDIRECT_NAME) OPTIMIZE (erms) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) + +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper) + attribute_hidden; + +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) + attribute_hidden; + extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_erms) attribute_hidden; @@ -32,30 +40,27 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_rtm) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_erms_rtm) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) - attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) - attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) - attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) + +extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden; + +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper) +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { - const struct cpu_features* cpu_features = __get_cpu_features (); + const struct cpu_features *cpu_features = __get_cpu_features (); if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS) || CPU_FEATURES_ARCH_P (cpu_features, Prefer_FSRM)) return OPTIMIZE (erms); - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F) && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) { - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) { if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) return OPTIMIZE (avx512_unaligned_erms); @@ -66,9 +71,10 @@ IFUNC_SELECTOR (void) return OPTIMIZE (avx512_no_vzeroupper); } - if (CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) + if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, + AVX_Fast_Unaligned_Load, )) { - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) { if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) return OPTIMIZE (evex_unaligned_erms); @@ -84,7 +90,8 @@ IFUNC_SELECTOR (void) return OPTIMIZE (avx_unaligned_rtm); } - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, + Prefer_No_VZEROUPPER, !)) { if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) return OPTIMIZE (avx_unaligned_erms); @@ -93,7 +100,11 @@ IFUNC_SELECTOR (void) } } - if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, SSSE3) + /* Leave this as runtime check. The SSSE3 is optimized almost + exclusively for avoiding unaligned memory access during the + copy and by and large is not better than the sse2 + implementation as a general purpose memmove. */ && !CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Copy)) { return OPTIMIZE (ssse3); diff --git a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S index 975ae6c051..a14b155667 100644 --- a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S @@ -1,12 +1,23 @@ -#if IS_IN (libc) +#include + +#if ISA_SHOULD_BUILD (3) + # define VEC_SIZE 32 # define VEC(i) ymm##i # define VMOVNT vmovntdq # define VMOVU vmovdqu # define VMOVA vmovdqa # define MOV_SIZE 4 + # define SECTION(p) p##.avx -# define MEMMOVE_SYMBOL(p,s) p##_avx_##s + +# ifndef MEMMOVE_SYMBOL +# define MEMMOVE_SYMBOL(p,s) p##_avx_##s +# endif # include "memmove-vec-unaligned-erms.S" + +# if MINIMUM_X86_ISA_LEVEL == 3 +# include "memmove-shlib-compat.h" +# endif #endif diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S index 42d15a142a..9c090d368b 100644 --- a/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S +++ b/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S @@ -17,8 +17,9 @@ . */ #include +#include -#if IS_IN (libc) +#if ISA_SHOULD_BUILD (4) # include "asm-syntax.h" diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S index 0fa7126830..8d1568a7ba 100644 --- a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S @@ -1,4 +1,7 @@ -#if IS_IN (libc) +#include + +#if ISA_SHOULD_BUILD (4) + # define VEC_SIZE 64 # define XMM0 xmm16 # define XMM1 xmm17 @@ -26,8 +29,12 @@ # define VMOVA vmovdqa64 # define VZEROUPPER # define MOV_SIZE 6 + # define SECTION(p) p##.evex512 -# define MEMMOVE_SYMBOL(p,s) p##_avx512_##s + +# ifndef MEMMOVE_SYMBOL +# define MEMMOVE_SYMBOL(p,s) p##_avx512_##s +# endif # include "memmove-vec-unaligned-erms.S" #endif diff --git a/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S index 88715441fe..2373017358 100644 --- a/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S @@ -1,4 +1,7 @@ -#if IS_IN (libc) +#include + +#if ISA_SHOULD_BUILD (4) + # define VEC_SIZE 32 # define XMM0 xmm16 # define XMM1 xmm17 @@ -26,8 +29,17 @@ # define VMOVA vmovdqa64 # define VZEROUPPER # define MOV_SIZE 6 + # define SECTION(p) p##.evex -# define MEMMOVE_SYMBOL(p,s) p##_evex_##s + +# ifndef MEMMOVE_SYMBOL +# define MEMMOVE_SYMBOL(p,s) p##_evex_##s +# endif # include "memmove-vec-unaligned-erms.S" + + +# if MINIMUM_X86_ISA_LEVEL == 4 +# include "memmove-shlib-compat.h" +# endif #endif diff --git a/sysdeps/x86_64/multiarch/memmove-shlib-compat.h b/sysdeps/x86_64/multiarch/memmove-shlib-compat.h new file mode 100644 index 0000000000..c0793d6eef --- /dev/null +++ b/sysdeps/x86_64/multiarch/memmove-shlib-compat.h @@ -0,0 +1,26 @@ +/* Copyright (C) 2016-2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if defined SHARED && IS_IN(libc) +# include +# if SHLIB_COMPAT(libc, GLIBC_2_2_5, GLIBC_2_14) +/* Use __memmove_{isa_level}_unaligned to support overlapping + addresses. */ +compat_symbol (libc, MEMMOVE_SYMBOL (__memmove, unaligned), memcpy, + GLIBC_2_2_5); +# endif +#endif diff --git a/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S index 09e7c1d6cd..422a079902 100644 --- a/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S @@ -16,18 +16,32 @@ License along with the GNU C Library; if not, see . */ -#if IS_IN (libc) -# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s -#else -weak_alias (__mempcpy, mempcpy) -#endif +#include + +/* MINIMUM_X86_ISA_LEVEL <= 2 because there is no V2 implementation + so we need this to build for ISA V2 builds. */ +#if ISA_SHOULD_BUILD (2) + +# include + +# define VEC_SIZE 16 +# define VEC(i) xmm##i +# define PREFETCHNT prefetchnta +# define VMOVNT movntdq +/* Use movups and movaps for smaller code sizes. */ +# define VMOVU movups +# define VMOVA movaps +# define MOV_SIZE 3 + +# define SECTION(p) p + +# ifndef MEMMOVE_SYMBOL +# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s +# endif -#include +# include "multiarch/memmove-vec-unaligned-erms.S" -#if defined SHARED && IS_IN (libc) -# include -# if SHLIB_COMPAT (libc, GLIBC_2_2_5, GLIBC_2_14) -/* Use __memmove_sse2_unaligned to support overlapping addresses. */ -compat_symbol (libc, __memmove_sse2_unaligned, memcpy, GLIBC_2_2_5); +# if MINIMUM_X86_ISA_LEVEL <= 2 +# include "memmove-shlib-compat.h" # endif #endif diff --git a/sysdeps/x86_64/multiarch/memmove-ssse3.S b/sysdeps/x86_64/multiarch/memmove-ssse3.S index 310ff62b86..60b2ba84e7 100644 --- a/sysdeps/x86_64/multiarch/memmove-ssse3.S +++ b/sysdeps/x86_64/multiarch/memmove-ssse3.S @@ -1,13 +1,17 @@ -#include - -#ifndef MEMMOVE -# define MEMMOVE __memmove_ssse3 -# define MEMMOVE_CHK __memmove_chk_ssse3 -# define MEMCPY __memcpy_ssse3 -# define MEMCPY_CHK __memcpy_chk_ssse3 -# define MEMPCPY __mempcpy_ssse3 -# define MEMPCPY_CHK __mempcpy_chk_ssse3 -#endif +#include + +#if ISA_SHOULD_BUILD (2) + +# include + +# ifndef MEMMOVE +# define MEMMOVE __memmove_ssse3 +# define MEMMOVE_CHK __memmove_chk_ssse3 +# define MEMCPY __memcpy_ssse3 +# define MEMCPY_CHK __memcpy_chk_ssse3 +# define MEMPCPY __mempcpy_ssse3 +# define MEMPCPY_CHK __mempcpy_chk_ssse3 +# endif .section .text.ssse3, "ax", @progbits ENTRY(MEMPCPY_CHK) @@ -27,10 +31,10 @@ ENTRY(MEMMOVE_CHK) END(MEMMOVE_CHK) ENTRY_P2ALIGN(MEMMOVE, 6) -# ifdef __ILP32__ +# ifdef __ILP32__ /* Clear the upper 32 bits. */ movl %edx, %edx -# endif +# endif movq %rdi, %rax L(start): cmpq $16, %rdx @@ -124,11 +128,11 @@ L(more_2x_vec): loop. */ movups %xmm0, (%rdi) -#ifdef SHARED_CACHE_SIZE_HALF +# ifdef SHARED_CACHE_SIZE_HALF cmp $SHARED_CACHE_SIZE_HALF, %RDX_LP -#else +# else cmp __x86_shared_cache_size_half(%rip), %rdx -#endif +# endif ja L(large_memcpy) leaq -64(%rdi, %rdx), %r8 @@ -206,7 +210,7 @@ L(end_loop_fwd): /* Extactly 64 bytes if `jmp L(end_loop_fwd)` is long encoding. 60 bytes otherwise. */ -#define ALIGNED_LOOP_FWD(align_by); \ +# define ALIGNED_LOOP_FWD(align_by); \ .p2align 6; \ L(loop_fwd_ ## align_by): \ movaps 16(%rsi), %xmm0; \ @@ -275,7 +279,7 @@ L(end_large_loop_fwd): /* Size > 64 bytes and <= 96 bytes. 32-byte align between ensure 96-byte spacing between each. */ -#define ALIGNED_LARGE_LOOP_FWD(align_by); \ +# define ALIGNED_LARGE_LOOP_FWD(align_by); \ .p2align 5; \ L(large_loop_fwd_ ## align_by): \ movaps 16(%rsi), %xmm0; \ @@ -343,7 +347,7 @@ L(end_loop_bkwd): /* Extactly 64 bytes if `jmp L(end_loop_bkwd)` is long encoding. 60 bytes otherwise. */ -#define ALIGNED_LOOP_BKWD(align_by); \ +# define ALIGNED_LOOP_BKWD(align_by); \ .p2align 6; \ L(loop_bkwd_ ## align_by): \ movaps 32(%rsi), %xmm1; \ @@ -382,3 +386,4 @@ END(MEMMOVE) strong_alias (MEMMOVE, MEMCPY) strong_alias (MEMMOVE_CHK, MEMCPY_CHK) +#endif diff --git a/sysdeps/x86_64/multiarch/rtld-memmove.S b/sysdeps/x86_64/multiarch/rtld-memmove.S new file mode 100644 index 0000000000..1f3ad6433b --- /dev/null +++ b/sysdeps/x86_64/multiarch/rtld-memmove.S @@ -0,0 +1,18 @@ +/* Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include "../memmove.S"