From patchwork Thu Jun 30 14:55:14 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "H.J. Lu" X-Patchwork-Id: 13521 Received: (qmail 107468 invoked by alias); 30 Jun 2016 14:55:28 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Delivered-To: mailing list libc-alpha@sourceware.org Received: (qmail 107458 invoked by uid 89); 30 Jun 2016 14:55:27 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-1.9 required=5.0 tests=AWL, BAYES_00, FREEMAIL_FROM, RCVD_IN_DNSWL_LOW, SPF_PASS autolearn=ham version=3.3.2 spammy=hongjiu.lu@intel.com, hongjiuluintelcom X-HELO: mail-qt0-f195.google.com X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:mime-version:in-reply-to:references:from:date :message-id:subject:to; bh=rFMySiphNHfT3/pfOxMLrgqCNWq3EVVaoNkwzBoCGCs=; b=jPtyyu6AedmS6Nwf1F5+0uXBPcQNUwhZJbyk2k9lNT56ZaGvWr9BnRJ3CRq5Zt46Xz 4zEqleIxKQMa8wrFnvzxiRCRBV1yiLIPME52orLSeOhg3bGQkRlPXvdT74dC1KFIKTog kTa8jcvSXcR0h0gdWcPcSujtyJvzwfIqJKX35gNkGKIOVJrPCPcsnQwa6ZmUqM9wxVgv ogmaksFqapwt2DsqTPOk0HtazwGm2w4PgrXL3CexEgrKHW1LBhUz33alDQ3HneULUORe 5mbhuTvTv/9cv0996JpML0uCNmsEHLQAXLfWOQLXLendYy9rl0OD+ufDB5EjZKb2rD2D gJzg== X-Gm-Message-State: ALyK8tI4ZEEAZNK+afu5tO1NxuPFxLuD5bFlhk8qKdj66AuCAZU68AKR+hkBW6AP75zn2KyrC8mbrALeWRYEww== X-Received: by 10.200.50.140 with SMTP id z12mr24183825qta.1.1467298515149; Thu, 30 Jun 2016 07:55:15 -0700 (PDT) MIME-Version: 1.0 In-Reply-To: <20160629184253.GA3534@intel.com> References: <20160629184253.GA3534@intel.com> From: "H.J. Lu" Date: Thu, 30 Jun 2016 07:55:14 -0700 Message-ID: Subject: Re: [PATCH] Check ERMS in memmove/memcpy/mempcpy/memset To: GNU C Library On Wed, Jun 29, 2016 at 11:42 AM, H.J. Lu wrote: > Although the Enhanced REP MOVSB/STOSB (ERMS) implementations of memmove, > memcpy, mempcpy and memset aren't used by the current processors, this > patch adds Prefer_ERMS check in memmove, memcpy, mempcpy and memset so > that glibc developers can experiment with it using GLIBC_IFUNC. > > OK for master? > > H.J. > --- > * sysdeps/x86/cpu-features.c (init_cpu_features): Also > check Prefer_ERMS. > * sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New. > (index_arch_Prefer_ERMS): Likewise. > * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return > __memcpy_erms for Prefer_ERMS. > * sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S > (__memmove_erms): Enabled for libc.a. > * ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return > __memmove_erms or Prefer_ERMS. > * sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return > __mempcpy_erms for Prefer_ERMS. > * sysdeps/x86_64/multiarch/memset.S (memset): Return > __memset_erms for Prefer_ERMS. I am checking in this patch without cpu-features.c change. From bdf594e6fa52ab2bf4d632a815181512a769fb7b Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Mon, 27 Jun 2016 12:27:49 -0700 Subject: [PATCH] Check Prefer_ERMS in memmove/memcpy/mempcpy/memset Although the Enhanced REP MOVSB/STOSB (ERMS) implementations of memmove, memcpy, mempcpy and memset aren't used by the current processors, this patch adds Prefer_ERMS check in memmove, memcpy, mempcpy and memset so that they can be used in the future. * sysdeps/x86/cpu-features.h (bit_arch_Prefer_ERMS): New. (index_arch_Prefer_ERMS): Likewise. * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Return __memcpy_erms for Prefer_ERMS. * sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S (__memmove_erms): Enabled for libc.a. * ysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Return __memmove_erms or Prefer_ERMS. * sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Return __mempcpy_erms for Prefer_ERMS. * sysdeps/x86_64/multiarch/memset.S (memset): Return __memset_erms for Prefer_ERMS. --- sysdeps/x86/cpu-features.h | 3 +++ sysdeps/x86_64/multiarch/memcpy.S | 3 +++ sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S | 6 +++++- sysdeps/x86_64/multiarch/memmove.S | 3 +++ sysdeps/x86_64/multiarch/mempcpy.S | 3 +++ sysdeps/x86_64/multiarch/memset.S | 3 +++ 6 files changed, 20 insertions(+), 1 deletion(-) diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h index 2bd9371..97ffe76 100644 --- a/sysdeps/x86/cpu-features.h +++ b/sysdeps/x86/cpu-features.h @@ -36,6 +36,7 @@ #define bit_arch_Prefer_MAP_32BIT_EXEC (1 << 16) #define bit_arch_Prefer_No_VZEROUPPER (1 << 17) #define bit_arch_Fast_Unaligned_Copy (1 << 18) +#define bit_arch_Prefer_ERMS (1 << 19) /* CPUID Feature flags. */ @@ -105,6 +106,7 @@ # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1*FEATURE_SIZE +# define index_arch_Prefer_ERMS FEATURE_INDEX_1*FEATURE_SIZE # if defined (_LIBC) && !IS_IN (nonlib) @@ -274,6 +276,7 @@ extern const struct cpu_features *__get_cpu_features (void) # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1 # define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1 +# define index_arch_Prefer_ERMS FEATURE_INDEX_1 #endif /* !__ASSEMBLER__ */ diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S index f6771a4..df7fbac 100644 --- a/sysdeps/x86_64/multiarch/memcpy.S +++ b/sysdeps/x86_64/multiarch/memcpy.S @@ -29,6 +29,9 @@ ENTRY(__new_memcpy) .type __new_memcpy, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + lea __memcpy_erms(%rip), %RAX_LP + HAS_ARCH_FEATURE (Prefer_ERMS) + jnz 2f # ifdef HAVE_AVX512_ASM_SUPPORT HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S index a2cce39..4893ea4 100644 --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S @@ -150,13 +150,15 @@ L(nop): #if defined USE_MULTIARCH && IS_IN (libc) END (MEMMOVE_SYMBOL (__memmove, unaligned)) -# if VEC_SIZE == 16 && defined SHARED +# if VEC_SIZE == 16 +# if defined SHARED /* Only used to measure performance of REP MOVSB. */ ENTRY (__mempcpy_erms) movq %rdi, %rax addq %rdx, %rax jmp L(start_movsb) END (__mempcpy_erms) +# endif ENTRY (__memmove_erms) movq %rdi, %rax @@ -181,7 +183,9 @@ L(movsb_backward): cld ret END (__memmove_erms) +# if defined SHARED strong_alias (__memmove_erms, __memcpy_erms) +# endif # endif # ifdef SHARED diff --git a/sysdeps/x86_64/multiarch/memmove.S b/sysdeps/x86_64/multiarch/memmove.S index 25c3586..8e1c6ac 100644 --- a/sysdeps/x86_64/multiarch/memmove.S +++ b/sysdeps/x86_64/multiarch/memmove.S @@ -27,6 +27,9 @@ ENTRY(__libc_memmove) .type __libc_memmove, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + lea __memmove_erms(%rip), %RAX_LP + HAS_ARCH_FEATURE (Prefer_ERMS) + jnz 2f # ifdef HAVE_AVX512_ASM_SUPPORT HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f diff --git a/sysdeps/x86_64/multiarch/mempcpy.S b/sysdeps/x86_64/multiarch/mempcpy.S index f9c6df3..4011a1a 100644 --- a/sysdeps/x86_64/multiarch/mempcpy.S +++ b/sysdeps/x86_64/multiarch/mempcpy.S @@ -29,6 +29,9 @@ ENTRY(__mempcpy) .type __mempcpy, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + lea __mempcpy_erms(%rip), %RAX_LP + HAS_ARCH_FEATURE (Prefer_ERMS) + jnz 2f # ifdef HAVE_AVX512_ASM_SUPPORT HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f diff --git a/sysdeps/x86_64/multiarch/memset.S b/sysdeps/x86_64/multiarch/memset.S index 4e52d8f..2b964a0 100644 --- a/sysdeps/x86_64/multiarch/memset.S +++ b/sysdeps/x86_64/multiarch/memset.S @@ -26,6 +26,9 @@ ENTRY(memset) .type memset, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + lea __memset_erms(%rip), %RAX_LP + HAS_ARCH_FEATURE (Prefer_ERMS) + jnz 2f lea __memset_sse2_unaligned_erms(%rip), %RAX_LP HAS_CPU_FEATURE (ERMS) jnz 1f -- 2.7.4