From patchwork Wed Oct 27 02:43:22 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Noah Goldstein X-Patchwork-Id: 46683 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id CB9BD385843F for ; Wed, 27 Oct 2021 02:46:50 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org CB9BD385843F DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1635302810; bh=VuGigQL1Hygq+/XMuQZz8XxlmpZCzVQfhwpIFD2CIc4=; h=To:Subject:Date:In-Reply-To:References:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To: From; b=jGxwgDasKeC9hQWbr9cqbAtKAiJfoeX+OLJSqwRtLN2C9kZOHYN/lvy8LSvI/uIcH 3v6Fv4cbAjXfiNgRobHl9NwKdFlmh0Tw5x8ptIyorQokIHz6+pSbF6D+bzqowICRTC lIiNNzplo9O7Jh0Tb9Yu7y4MDvbxgt/Myoij1zxw= X-Original-To: libc-alpha@sourceware.org Delivered-To: libc-alpha@sourceware.org Received: from mail-il1-x12d.google.com (mail-il1-x12d.google.com [IPv6:2607:f8b0:4864:20::12d]) by sourceware.org (Postfix) with ESMTPS id A5DEC385842E for ; Wed, 27 Oct 2021 02:43:39 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org A5DEC385842E Received: by mail-il1-x12d.google.com with SMTP id w15so1399777ilv.5 for ; Tue, 26 Oct 2021 19:43:39 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=VuGigQL1Hygq+/XMuQZz8XxlmpZCzVQfhwpIFD2CIc4=; b=glWUTvQ6cSyQqATXCgMv5uo7RdbQdOHs3RQItfRLUXC1ln5Sf50C+xE4adHRLrcyE9 DRQKd9HNM2/+MjgWVnYzduvPFcYc0hAFmhZVyHf8YS7dwVonY0lPFOJFYYjW66fW3t0a lWt6Mw9d2/IkOYfF5VFdISSsXBypQNfZRaNmbjGu+UaRgeOuTu54VBNpkr4ewiGvVEzz 8mVh9KvcBEaCqgbvRNw7S4bmIAE1ZEF0fiCFFHLkr2mSQsVcBBLS/v63TZZwk1YuI1v+ IBFDygcReM6QW89eDG8F0ImnHSv+y1z7J77pgA2r6r+V4GQq57SaVaAJ4XER/hCy3MnW em7w== X-Gm-Message-State: AOAM532gGRrKwKe95iHQDYsdE8EDrby+wuS5eg7XiQB4jaJOV6w7PAGB zQj68nqkKydAHwPa5kPUbGGc8ka2e/4= X-Google-Smtp-Source: ABdhPJzF4pzXPeDwtkzWJF1FklaIYKjZmm5XI7/nr/Z0nP3gofgKSHzpKFTh5pehPRK64f2F4V0C+g== X-Received: by 2002:a92:cf50:: with SMTP id c16mr16859167ilr.145.1635302618893; Tue, 26 Oct 2021 19:43:38 -0700 (PDT) Received: from localhost.localdomain (mobile-130-126-255-38.near.illinois.edu. [130.126.255.38]) by smtp.googlemail.com with ESMTPSA id l6sm12215373ilt.31.2021.10.26.19.43.38 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 26 Oct 2021 19:43:38 -0700 (PDT) To: libc-alpha@sourceware.org Subject: [PATCH v1 5/6] x86_64: Add avx2 optimized __memcmpeq in memcmpeq-avx2.S Date: Tue, 26 Oct 2021 21:43:22 -0500 Message-Id: <20211027024323.1199441-5-goldstein.w.n@gmail.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211027024323.1199441-1-goldstein.w.n@gmail.com> References: <20211027024323.1199441-1-goldstein.w.n@gmail.com> MIME-Version: 1.0 X-Spam-Status: No, score=-12.0 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, KAM_SHORT, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.4 X-Spam-Checker-Version: SpamAssassin 3.4.4 (2020-01-24) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Noah Goldstein via Libc-alpha From: Noah Goldstein Reply-To: Noah Goldstein Errors-To: libc-alpha-bounces+patchwork=sourceware.org@sourceware.org Sender: "Libc-alpha" No bug. This commit adds new optimized __memcmpeq implementation for avx2. The primary optimizations are: 1) skipping the logic to find the difference of the first mismatched byte. 2) not updating src/dst addresses as the non-equals logic does not need to be reused by different areas. Reviewed-by: H.J. Lu --- sysdeps/x86_64/multiarch/ifunc-impl-list.c | 2 - sysdeps/x86_64/multiarch/ifunc-memcmpeq.h | 2 +- sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S | 4 +- sysdeps/x86_64/multiarch/memcmpeq-avx2.S | 309 ++++++++++++++++++- 4 files changed, 308 insertions(+), 9 deletions(-) diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c index f7f3806d1d..535450f52c 100644 --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c @@ -42,13 +42,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL (i, name, __memcmpeq, IFUNC_IMPL_ADD (array, i, __memcmpeq, (CPU_FEATURE_USABLE (AVX2) - && CPU_FEATURE_USABLE (MOVBE) && CPU_FEATURE_USABLE (BMI2)), __memcmpeq_avx2) IFUNC_IMPL_ADD (array, i, __memcmpeq, (CPU_FEATURE_USABLE (AVX2) && CPU_FEATURE_USABLE (BMI2) - && CPU_FEATURE_USABLE (MOVBE) && CPU_FEATURE_USABLE (RTM)), __memcmpeq_avx2_rtm) IFUNC_IMPL_ADD (array, i, __memcmpeq, diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmpeq.h b/sysdeps/x86_64/multiarch/ifunc-memcmpeq.h index 3319a9568a..e596c5048b 100644 --- a/sysdeps/x86_64/multiarch/ifunc-memcmpeq.h +++ b/sysdeps/x86_64/multiarch/ifunc-memcmpeq.h @@ -31,10 +31,10 @@ IFUNC_SELECTOR (void) if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) && CPU_FEATURE_USABLE_P (cpu_features, BMI2) - && CPU_FEATURE_USABLE_P (cpu_features, MOVBE) && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) { if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, MOVBE) && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) return OPTIMIZE1 (evex); diff --git a/sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S b/sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S index 24b6a0c9ff..3264a4a76c 100644 --- a/sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S +++ b/sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S @@ -1,5 +1,5 @@ -#ifndef MEMCMP -# define MEMCMP __memcmpeq_avx2_rtm +#ifndef MEMCMPEQ +# define MEMCMPEQ __memcmpeq_avx2_rtm #endif #define ZERO_UPPER_VEC_REGISTERS_RETURN \ diff --git a/sysdeps/x86_64/multiarch/memcmpeq-avx2.S b/sysdeps/x86_64/multiarch/memcmpeq-avx2.S index 0181ea0d8d..0bf59fb8fa 100644 --- a/sysdeps/x86_64/multiarch/memcmpeq-avx2.S +++ b/sysdeps/x86_64/multiarch/memcmpeq-avx2.S @@ -16,8 +16,309 @@ License along with the GNU C Library; if not, see . */ -#ifndef MEMCMP -# define MEMCMP __memcmpeq_avx2 -#endif +#if IS_IN (libc) + +/* __memcmpeq is implemented as: + 1. Use ymm vector compares when possible. The only case where + vector compares is not possible for when size < VEC_SIZE + and loading from either s1 or s2 would cause a page cross. + 2. Use xmm vector compare when size >= 8 bytes. + 3. Optimistically compare up to first 4 * VEC_SIZE one at a + to check for early mismatches. Only do this if its guranteed the + work is not wasted. + 4. If size is 8 * VEC_SIZE or less, unroll the loop. + 5. Compare 4 * VEC_SIZE at a time with the aligned first memory + area. + 6. Use 2 vector compares when size is 2 * VEC_SIZE or less. + 7. Use 4 vector compares when size is 4 * VEC_SIZE or less. + 8. Use 8 vector compares when size is 8 * VEC_SIZE or less. */ + +# include + +# ifndef MEMCMPEQ +# define MEMCMPEQ __memcmpeq_avx2 +# endif + +# define VPCMPEQ vpcmpeqb + +# ifndef VZEROUPPER +# define VZEROUPPER vzeroupper +# endif + +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + +# define VEC_SIZE 32 +# define PAGE_SIZE 4096 + + .section SECTION(.text), "ax", @progbits +ENTRY_P2ALIGN (MEMCMPEQ, 6) +# ifdef __ILP32__ + /* Clear the upper 32 bits. */ + movl %edx, %edx +# endif + cmp $VEC_SIZE, %RDX_LP + jb L(less_vec) + + /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */ + vmovdqu (%rsi), %ymm1 + VPCMPEQ (%rdi), %ymm1, %ymm1 + vpmovmskb %ymm1, %eax + incl %eax + jnz L(return_neq0) + cmpq $(VEC_SIZE * 2), %rdx + jbe L(last_1x_vec) + + /* Check second VEC no matter what. */ + vmovdqu VEC_SIZE(%rsi), %ymm2 + VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2 + vpmovmskb %ymm2, %eax + /* If all 4 VEC where equal eax will be all 1s so incl will overflow + and set zero flag. */ + incl %eax + jnz L(return_neq0) + + /* Less than 4 * VEC. */ + cmpq $(VEC_SIZE * 4), %rdx + jbe L(last_2x_vec) + + /* Check third and fourth VEC no matter what. */ + vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3 + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3 + vpmovmskb %ymm3, %eax + incl %eax + jnz L(return_neq0) + + vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4 + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4 + vpmovmskb %ymm4, %eax + incl %eax + jnz L(return_neq0) + + /* Go to 4x VEC loop. */ + cmpq $(VEC_SIZE * 8), %rdx + ja L(more_8x_vec) + + /* Handle remainder of size = 4 * VEC + 1 to 8 * VEC without any + branches. */ + + /* Adjust rsi and rdi to avoid indexed address mode. This end up + saving a 16 bytes of code, prevents unlamination, and bottlenecks in + the AGU. */ + addq %rdx, %rsi + vmovdqu -(VEC_SIZE * 4)(%rsi), %ymm1 + vmovdqu -(VEC_SIZE * 3)(%rsi), %ymm2 + addq %rdx, %rdi + + VPCMPEQ -(VEC_SIZE * 4)(%rdi), %ymm1, %ymm1 + VPCMPEQ -(VEC_SIZE * 3)(%rdi), %ymm2, %ymm2 + + vmovdqu -(VEC_SIZE * 2)(%rsi), %ymm3 + VPCMPEQ -(VEC_SIZE * 2)(%rdi), %ymm3, %ymm3 + vmovdqu -VEC_SIZE(%rsi), %ymm4 + VPCMPEQ -VEC_SIZE(%rdi), %ymm4, %ymm4 + + /* Reduce VEC0 - VEC4. */ + vpand %ymm1, %ymm2, %ymm2 + vpand %ymm3, %ymm4, %ymm4 + vpand %ymm2, %ymm4, %ymm4 + vpmovmskb %ymm4, %eax + incl %eax +L(return_neq0): +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN -#include "memcmp-avx2-movbe.S" + /* NB: p2align 5 here will ensure the L(loop_4x_vec) is also 32 byte + aligned. */ + .p2align 5 +L(less_vec): + /* Check if one or less char. This is necessary for size = 0 but is + also faster for size = 1. */ + cmpl $1, %edx + jbe L(one_or_less) + + /* Check if loading one VEC from either s1 or s2 could cause a page + cross. This can have false positives but is by far the fastest + method. */ + movl %edi, %eax + orl %esi, %eax + andl $(PAGE_SIZE - 1), %eax + cmpl $(PAGE_SIZE - VEC_SIZE), %eax + jg L(page_cross_less_vec) + + /* No page cross possible. */ + vmovdqu (%rsi), %ymm2 + VPCMPEQ (%rdi), %ymm2, %ymm2 + vpmovmskb %ymm2, %eax + incl %eax + /* Result will be zero if s1 and s2 match. Otherwise first set bit + will be first mismatch. */ + bzhil %edx, %eax, %eax + VZEROUPPER_RETURN + + /* Relatively cold but placing close to L(less_vec) for 2 byte jump + encoding. */ + .p2align 4 +L(one_or_less): + jb L(zero) + movzbl (%rsi), %ecx + movzbl (%rdi), %eax + subl %ecx, %eax + /* No ymm register was touched. */ + ret + /* Within the same 16 byte block is L(one_or_less). */ +L(zero): + xorl %eax, %eax + ret + + .p2align 4 +L(last_1x_vec): + vmovdqu -(VEC_SIZE * 1)(%rsi, %rdx), %ymm1 + VPCMPEQ -(VEC_SIZE * 1)(%rdi, %rdx), %ymm1, %ymm1 + vpmovmskb %ymm1, %eax + incl %eax + VZEROUPPER_RETURN + + .p2align 4 +L(last_2x_vec): + vmovdqu -(VEC_SIZE * 2)(%rsi, %rdx), %ymm1 + VPCMPEQ -(VEC_SIZE * 2)(%rdi, %rdx), %ymm1, %ymm1 + vmovdqu -(VEC_SIZE * 1)(%rsi, %rdx), %ymm2 + VPCMPEQ -(VEC_SIZE * 1)(%rdi, %rdx), %ymm2, %ymm2 + vpand %ymm1, %ymm2, %ymm2 + vpmovmskb %ymm2, %eax + incl %eax + VZEROUPPER_RETURN + + .p2align 4 +L(more_8x_vec): + /* Set end of s1 in rdx. */ + leaq -(VEC_SIZE * 4)(%rdi, %rdx), %rdx + /* rsi stores s2 - s1. This allows loop to only update one pointer. + */ + subq %rdi, %rsi + /* Align s1 pointer. */ + andq $-VEC_SIZE, %rdi + /* Adjust because first 4x vec where check already. */ + subq $-(VEC_SIZE * 4), %rdi + .p2align 4 +L(loop_4x_vec): + /* rsi has s2 - s1 so get correct address by adding s1 (in rdi). */ + vmovdqu (%rsi, %rdi), %ymm1 + VPCMPEQ (%rdi), %ymm1, %ymm1 + + vmovdqu VEC_SIZE(%rsi, %rdi), %ymm2 + VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2 + + vmovdqu (VEC_SIZE * 2)(%rsi, %rdi), %ymm3 + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3 + + vmovdqu (VEC_SIZE * 3)(%rsi, %rdi), %ymm4 + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4 + + vpand %ymm1, %ymm2, %ymm2 + vpand %ymm3, %ymm4, %ymm4 + vpand %ymm2, %ymm4, %ymm4 + vpmovmskb %ymm4, %eax + incl %eax + jnz L(return_neq1) + subq $-(VEC_SIZE * 4), %rdi + /* Check if s1 pointer at end. */ + cmpq %rdx, %rdi + jb L(loop_4x_vec) + + vmovdqu (VEC_SIZE * 3)(%rsi, %rdx), %ymm4 + VPCMPEQ (VEC_SIZE * 3)(%rdx), %ymm4, %ymm4 + subq %rdx, %rdi + /* rdi has 4 * VEC_SIZE - remaining length. */ + cmpl $(VEC_SIZE * 3), %edi + jae L(8x_last_1x_vec) + /* Load regardless of branch. */ + vmovdqu (VEC_SIZE * 2)(%rsi, %rdx), %ymm3 + VPCMPEQ (VEC_SIZE * 2)(%rdx), %ymm3, %ymm3 + cmpl $(VEC_SIZE * 2), %edi + jae L(8x_last_2x_vec) + /* Check last 4 VEC. */ + vmovdqu VEC_SIZE(%rsi, %rdx), %ymm1 + VPCMPEQ VEC_SIZE(%rdx), %ymm1, %ymm1 + + vmovdqu (%rsi, %rdx), %ymm2 + VPCMPEQ (%rdx), %ymm2, %ymm2 + + vpand %ymm3, %ymm4, %ymm4 + vpand %ymm1, %ymm2, %ymm3 +L(8x_last_2x_vec): + vpand %ymm3, %ymm4, %ymm4 +L(8x_last_1x_vec): + vpmovmskb %ymm4, %eax + /* Restore s1 pointer to rdi. */ + incl %eax +L(return_neq1): + VZEROUPPER_RETURN + + /* Relatively cold case as page cross are unexpected. */ + .p2align 4 +L(page_cross_less_vec): + cmpl $16, %edx + jae L(between_16_31) + cmpl $8, %edx + ja L(between_9_15) + cmpl $4, %edx + jb L(between_2_3) + /* From 4 to 8 bytes. No branch when size == 4. */ + movl (%rdi), %eax + subl (%rsi), %eax + movl -4(%rdi, %rdx), %ecx + movl -4(%rsi, %rdx), %edi + subl %edi, %ecx + orl %ecx, %eax + ret + + .p2align 4,, 8 +L(between_16_31): + /* From 16 to 31 bytes. No branch when size == 16. */ + + /* Safe to use xmm[0, 15] as no vzeroupper is needed so RTM safe. + */ + vmovdqu (%rsi), %xmm1 + vpcmpeqb (%rdi), %xmm1, %xmm1 + vmovdqu -16(%rsi, %rdx), %xmm2 + vpcmpeqb -16(%rdi, %rdx), %xmm2, %xmm2 + vpand %xmm1, %xmm2, %xmm2 + vpmovmskb %xmm2, %eax + notw %ax + /* No ymm register was touched. */ + ret + + .p2align 4,, 8 +L(between_9_15): + /* From 9 to 15 bytes. */ + movq (%rdi), %rax + subq (%rsi), %rax + movq -8(%rdi, %rdx), %rcx + movq -8(%rsi, %rdx), %rdi + subq %rdi, %rcx + orq %rcx, %rax + /* edx is guranteed to be a non-zero int. */ + cmovnz %edx, %eax + ret + + /* Don't align. This is cold and aligning here will cause code + to spill into next cache line. */ +L(between_2_3): + /* From 2 to 3 bytes. No branch when size == 2. */ + movzwl (%rdi), %eax + movzwl (%rsi), %ecx + subl %ecx, %eax + movzbl -1(%rdi, %rdx), %ecx + /* All machines that support evex will insert a "merging uop" + avoiding any serious partial register stalls. */ + subb -1(%rsi, %rdx), %cl + orl %ecx, %eax + /* No ymm register was touched. */ + ret + + /* 2 Bytes from next cache line. */ +END (MEMCMPEQ) +#endif