X86-64: Add _dl_runtime_resolve_avx[512]_opt [BZ #20508]

Message ID CAMe9rOoDp7XYwLdDetFNghAdDo_zj-4342LKV4i0zCH36aBWtw@mail.gmail.com
State New, archived
Headers

Commit Message

H.J. Lu Aug. 30, 2016, 8:30 p.m. UTC
  On Mon, Aug 29, 2016 at 5:01 PM, H.J. Lu <hjl.tools@gmail.com> wrote:
> On Mon, Aug 29, 2016 at 4:07 PM, Richard Henderson <rth@twiddle.net> wrote:
>> On 08/26/2016 10:18 AM, H.J. Lu wrote:
>>>
>>> +       vpcmpeqd %xmm8, %xmm8, %xmm8
>>> +       vorpd %ymm9, %ymm10, %ymm10
>>> +       vptest %ymm10, %ymm8
>>
>>
>> No need to create a mask of all -1; use vptest ymm10, ymm10.
>>
>
> ymm8 isn't all -1.  Only the lower 128 bis are all -1:
>
>
> (gdb) p/x $ymm8
> $4 = {v8_float = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, v4_double = {
>     0x8000000000000000, 0x8000000000000000, 0x0, 0x0}, v32_int8 = {
>     0xff <repeats 16 times>, 0x0 <repeats 16 times>}, v16_int16 = {0xffff,
>     0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0, 0x0, 0x0,
>     0x0, 0x0, 0x0, 0x0, 0x0}, v8_int32 = {0xffffffff, 0xffffffff, 0xffffffff,
>     0xffffffff, 0x0, 0x0, 0x0, 0x0}, v4_int64 = {0xffffffffffffffff,
>     0xffffffffffffffff, 0x0, 0x0}, v2_int128 = {
>     0xffffffffffffffffffffffffffffffff, 0x00000000000000000000000000000000}}
> (gdb)
>
> ymm10 (ymm0|..|ymm7) has
>
> (gdb) p/x $ymm10
> $2 = {v8_float = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, v4_double = {
>     0x8000000000000000, 0x8000000000000000, 0x0, 0x0}, v32_int8 = {0x6d,
>     0x79, 0x72, 0x6f, 0x7f, 0x74, 0x6f, 0x73, 0x77, 0x6f, 0x6f, 0x67, 0x6f,
>     0xff, 0x6f, 0xff, 0x0 <repeats 16 times>}, v16_int16 = {0x796d, 0x6f72,
>     0x747f, 0x736f, 0x6f77, 0x676f, 0xff6f, 0xff6f, 0x0, 0x0, 0x0, 0x0, 0x0,
>     0x0, 0x0, 0x0}, v8_int32 = {0x6f72796d, 0x736f747f, 0x676f6f77,
>     0xff6fff6f, 0x0, 0x0, 0x0, 0x0}, v4_int64 = {0x736f747f6f72796d,
>     0xff6fff6f676f6f77, 0x0, 0x0}, v2_int128 = {
>     0xff6fff6f676f6f77736f747f6f72796d, 0x00000000000000000000000000000000}}
>
> Since
>
> vptest %ymm10, %ymm8
>
> IF (SRC[255:0] BITWISE AND NOT DEST[255:0] = 0)
> THEN CF = 1;
> ELSE CF = 0;
>
> this ignores the lower 128 bits of ymm10 and sets CF = 0
> only if the upper 128 bits of ymm10 aren't zero.  If we use
>
> vptest ymm10, ymm10
>
> CF is always 1 and we will always preserve ymm0-ymm7 even
> when the upper 128 bits are zero.
>

Here is the updated patch to add PRESERVE_BND_REGS_PREFIX
before branches.  Otherwise bound registers will be cleared.  OK
for master?
  

Comments

H.J. Lu Sept. 1, 2016, 9:52 p.m. UTC | #1
On Tue, Aug 30, 2016 at 1:30 PM, H.J. Lu <hjl.tools@gmail.com> wrote:
> On Mon, Aug 29, 2016 at 5:01 PM, H.J. Lu <hjl.tools@gmail.com> wrote:
>> On Mon, Aug 29, 2016 at 4:07 PM, Richard Henderson <rth@twiddle.net> wrote:
>>> On 08/26/2016 10:18 AM, H.J. Lu wrote:
>>>>
>>>> +       vpcmpeqd %xmm8, %xmm8, %xmm8
>>>> +       vorpd %ymm9, %ymm10, %ymm10
>>>> +       vptest %ymm10, %ymm8
>>>
>>>
>>> No need to create a mask of all -1; use vptest ymm10, ymm10.
>>>
>>
>> ymm8 isn't all -1.  Only the lower 128 bis are all -1:
>>
>>
>> (gdb) p/x $ymm8
>> $4 = {v8_float = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, v4_double = {
>>     0x8000000000000000, 0x8000000000000000, 0x0, 0x0}, v32_int8 = {
>>     0xff <repeats 16 times>, 0x0 <repeats 16 times>}, v16_int16 = {0xffff,
>>     0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0, 0x0, 0x0,
>>     0x0, 0x0, 0x0, 0x0, 0x0}, v8_int32 = {0xffffffff, 0xffffffff, 0xffffffff,
>>     0xffffffff, 0x0, 0x0, 0x0, 0x0}, v4_int64 = {0xffffffffffffffff,
>>     0xffffffffffffffff, 0x0, 0x0}, v2_int128 = {
>>     0xffffffffffffffffffffffffffffffff, 0x00000000000000000000000000000000}}
>> (gdb)
>>
>> ymm10 (ymm0|..|ymm7) has
>>
>> (gdb) p/x $ymm10
>> $2 = {v8_float = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, v4_double = {
>>     0x8000000000000000, 0x8000000000000000, 0x0, 0x0}, v32_int8 = {0x6d,
>>     0x79, 0x72, 0x6f, 0x7f, 0x74, 0x6f, 0x73, 0x77, 0x6f, 0x6f, 0x67, 0x6f,
>>     0xff, 0x6f, 0xff, 0x0 <repeats 16 times>}, v16_int16 = {0x796d, 0x6f72,
>>     0x747f, 0x736f, 0x6f77, 0x676f, 0xff6f, 0xff6f, 0x0, 0x0, 0x0, 0x0, 0x0,
>>     0x0, 0x0, 0x0}, v8_int32 = {0x6f72796d, 0x736f747f, 0x676f6f77,
>>     0xff6fff6f, 0x0, 0x0, 0x0, 0x0}, v4_int64 = {0x736f747f6f72796d,
>>     0xff6fff6f676f6f77, 0x0, 0x0}, v2_int128 = {
>>     0xff6fff6f676f6f77736f747f6f72796d, 0x00000000000000000000000000000000}}
>>
>> Since
>>
>> vptest %ymm10, %ymm8
>>
>> IF (SRC[255:0] BITWISE AND NOT DEST[255:0] = 0)
>> THEN CF = 1;
>> ELSE CF = 0;
>>
>> this ignores the lower 128 bits of ymm10 and sets CF = 0
>> only if the upper 128 bits of ymm10 aren't zero.  If we use
>>
>> vptest ymm10, ymm10
>>
>> CF is always 1 and we will always preserve ymm0-ymm7 even
>> when the upper 128 bits are zero.
>>
>
> Here is the updated patch to add PRESERVE_BND_REGS_PREFIX
> before branches.  Otherwise bound registers will be cleared.  OK
> for master?
>

Any comments? I will check it in next week if there is no objection.
  
H.J. Lu Sept. 27, 2016, 5:35 p.m. UTC | #2
On Thu, Sep 1, 2016 at 2:52 PM, H.J. Lu <hjl.tools@gmail.com> wrote:
> On Tue, Aug 30, 2016 at 1:30 PM, H.J. Lu <hjl.tools@gmail.com> wrote:
>> On Mon, Aug 29, 2016 at 5:01 PM, H.J. Lu <hjl.tools@gmail.com> wrote:
>>> On Mon, Aug 29, 2016 at 4:07 PM, Richard Henderson <rth@twiddle.net> wrote:
>>>> On 08/26/2016 10:18 AM, H.J. Lu wrote:
>>>>>
>>>>> +       vpcmpeqd %xmm8, %xmm8, %xmm8
>>>>> +       vorpd %ymm9, %ymm10, %ymm10
>>>>> +       vptest %ymm10, %ymm8
>>>>
>>>>
>>>> No need to create a mask of all -1; use vptest ymm10, ymm10.
>>>>
>>>
>>> ymm8 isn't all -1.  Only the lower 128 bis are all -1:
>>>
>>>
>>> (gdb) p/x $ymm8
>>> $4 = {v8_float = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, v4_double = {
>>>     0x8000000000000000, 0x8000000000000000, 0x0, 0x0}, v32_int8 = {
>>>     0xff <repeats 16 times>, 0x0 <repeats 16 times>}, v16_int16 = {0xffff,
>>>     0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0, 0x0, 0x0,
>>>     0x0, 0x0, 0x0, 0x0, 0x0}, v8_int32 = {0xffffffff, 0xffffffff, 0xffffffff,
>>>     0xffffffff, 0x0, 0x0, 0x0, 0x0}, v4_int64 = {0xffffffffffffffff,
>>>     0xffffffffffffffff, 0x0, 0x0}, v2_int128 = {
>>>     0xffffffffffffffffffffffffffffffff, 0x00000000000000000000000000000000}}
>>> (gdb)
>>>
>>> ymm10 (ymm0|..|ymm7) has
>>>
>>> (gdb) p/x $ymm10
>>> $2 = {v8_float = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, v4_double = {
>>>     0x8000000000000000, 0x8000000000000000, 0x0, 0x0}, v32_int8 = {0x6d,
>>>     0x79, 0x72, 0x6f, 0x7f, 0x74, 0x6f, 0x73, 0x77, 0x6f, 0x6f, 0x67, 0x6f,
>>>     0xff, 0x6f, 0xff, 0x0 <repeats 16 times>}, v16_int16 = {0x796d, 0x6f72,
>>>     0x747f, 0x736f, 0x6f77, 0x676f, 0xff6f, 0xff6f, 0x0, 0x0, 0x0, 0x0, 0x0,
>>>     0x0, 0x0, 0x0}, v8_int32 = {0x6f72796d, 0x736f747f, 0x676f6f77,
>>>     0xff6fff6f, 0x0, 0x0, 0x0, 0x0}, v4_int64 = {0x736f747f6f72796d,
>>>     0xff6fff6f676f6f77, 0x0, 0x0}, v2_int128 = {
>>>     0xff6fff6f676f6f77736f747f6f72796d, 0x00000000000000000000000000000000}}
>>>
>>> Since
>>>
>>> vptest %ymm10, %ymm8
>>>
>>> IF (SRC[255:0] BITWISE AND NOT DEST[255:0] = 0)
>>> THEN CF = 1;
>>> ELSE CF = 0;
>>>
>>> this ignores the lower 128 bits of ymm10 and sets CF = 0
>>> only if the upper 128 bits of ymm10 aren't zero.  If we use
>>>
>>> vptest ymm10, ymm10
>>>
>>> CF is always 1 and we will always preserve ymm0-ymm7 even
>>> when the upper 128 bits are zero.
>>>
>>
>> Here is the updated patch to add PRESERVE_BND_REGS_PREFIX
>> before branches.  Otherwise bound registers will be cleared.  OK
>> for master?
>>
>
> Any comments? I will check it in next week if there is no objection.

I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
  
Florian Weimer Oct. 4, 2016, 10:52 a.m. UTC | #3
On 09/27/2016 07:35 PM, H.J. Lu wrote:

>> Any comments? I will check it in next week if there is no objection.
>
> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?

Just this change, or the requirement for an AVX512F-capable assembler as 
well?

Thanks,
Florian
  
H.J. Lu Oct. 4, 2016, 2:47 p.m. UTC | #4
On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com> wrote:
> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>
>>> Any comments? I will check it in next week if there is no objection.
>>
>>
>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>
>
> Just this change, or the requirement for an AVX512F-capable assembler as
> well?
>

Good question.  This is also needed:

commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
Author: H.J. Lu <hjl.tools@gmail.com>
Date:   Fri Jul 1 05:54:43 2016 -0700

    Require binutils 2.24 to build x86-64 glibc [BZ #20139]

    If assembler doesn't support AVX512DQ, _dl_runtime_resolve_avx is used
    to save the first 8 vector registers, which only saves the lower 256
    bits of vector register, for lazy binding.  When it is called on AVX512
    platform, the upper 256 bits of ZMM registers are clobbered.  Parameters
    passed in ZMM registers will be wrong when the function is called the
    first time.  This patch requires binutils 2.24, whose assembler can store
    and load ZMM registers, to build x86-64 glibc.  Since mathvec library
    needs assembler support for AVX512DQ,  we disable mathvec if assembler
    doesn't support AVX512DQ.
  
Adhemerval Zanella Netto Oct. 4, 2016, 3:18 p.m. UTC | #5
On 04/10/2016 11:47, H.J. Lu wrote:
> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com> wrote:
>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>
>>>> Any comments? I will check it in next week if there is no objection.
>>>
>>>
>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>
>>
>> Just this change, or the requirement for an AVX512F-capable assembler as
>> well?
>>
> 
> Good question.  This is also needed:
> 
> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
> Author: H.J. Lu <hjl.tools@gmail.com>
> Date:   Fri Jul 1 05:54:43 2016 -0700
> 
>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
> 
>     If assembler doesn't support AVX512DQ, _dl_runtime_resolve_avx is used
>     to save the first 8 vector registers, which only saves the lower 256
>     bits of vector register, for lazy binding.  When it is called on AVX512
>     platform, the upper 256 bits of ZMM registers are clobbered.  Parameters
>     passed in ZMM registers will be wrong when the function is called the
>     first time.  This patch requires binutils 2.24, whose assembler can store
>     and load ZMM registers, to build x86-64 glibc.  Since mathvec library
>     needs assembler support for AVX512DQ,  we disable mathvec if assembler
>     doesn't support AVX512DQ.

This is ok for 2.23 and 2.24.
  
Florian Weimer Oct. 4, 2016, 3:24 p.m. UTC | #6
On 10/04/2016 04:47 PM, H.J. Lu wrote:
> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com> wrote:
>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>
>>>> Any comments? I will check it in next week if there is no objection.
>>>
>>>
>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>
>>
>> Just this change, or the requirement for an AVX512F-capable assembler as
>> well?
>>
>
> Good question.  This is also needed:
>
> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
> Author: H.J. Lu <hjl.tools@gmail.com>
> Date:   Fri Jul 1 05:54:43 2016 -0700
>
>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
>
>     If assembler doesn't support AVX512DQ, _dl_runtime_resolve_avx is used
>     to save the first 8 vector registers, which only saves the lower 256
>     bits of vector register, for lazy binding.  When it is called on AVX512
>     platform, the upper 256 bits of ZMM registers are clobbered.  Parameters
>     passed in ZMM registers will be wrong when the function is called the
>     first time.  This patch requires binutils 2.24, whose assembler can store
>     and load ZMM registers, to build x86-64 glibc.  Since mathvec library
>     needs assembler support for AVX512DQ,  we disable mathvec if assembler
>     doesn't support AVX512DQ.

That's not really backportable, I'm afraid.  Our users don't expect we 
break builds in this way.

Florian
  
H.J. Lu Oct. 4, 2016, 3:34 p.m. UTC | #7
On Tue, Oct 4, 2016 at 8:24 AM, Florian Weimer <fweimer@redhat.com> wrote:
> On 10/04/2016 04:47 PM, H.J. Lu wrote:
>>
>> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>
>>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>>
>>>>> Any comments? I will check it in next week if there is no objection.
>>>>
>>>>
>>>>
>>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>>
>>>
>>>
>>> Just this change, or the requirement for an AVX512F-capable assembler as
>>> well?
>>>
>>
>> Good question.  This is also needed:
>>
>> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
>> Author: H.J. Lu <hjl.tools@gmail.com>
>> Date:   Fri Jul 1 05:54:43 2016 -0700
>>
>>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
>>
>>     If assembler doesn't support AVX512DQ, _dl_runtime_resolve_avx is used
>>     to save the first 8 vector registers, which only saves the lower 256
>>     bits of vector register, for lazy binding.  When it is called on
>> AVX512
>>     platform, the upper 256 bits of ZMM registers are clobbered.
>> Parameters
>>     passed in ZMM registers will be wrong when the function is called the
>>     first time.  This patch requires binutils 2.24, whose assembler can
>> store
>>     and load ZMM registers, to build x86-64 glibc.  Since mathvec library
>>     needs assembler support for AVX512DQ,  we disable mathvec if assembler
>>     doesn't support AVX512DQ.
>
>
> That's not really backportable, I'm afraid.  Our users don't expect we break
> builds in this way.
>

Who are those users?
  
Florian Weimer Oct. 4, 2016, 3:48 p.m. UTC | #8
On 10/04/2016 05:34 PM, H.J. Lu wrote:
> On Tue, Oct 4, 2016 at 8:24 AM, Florian Weimer <fweimer@redhat.com> wrote:
>> On 10/04/2016 04:47 PM, H.J. Lu wrote:
>>>
>>> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>>
>>>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>>>
>>>>>> Any comments? I will check it in next week if there is no objection.
>>>>>
>>>>>
>>>>>
>>>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>>>
>>>>
>>>>
>>>> Just this change, or the requirement for an AVX512F-capable assembler as
>>>> well?
>>>>
>>>
>>> Good question.  This is also needed:
>>>
>>> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
>>> Author: H.J. Lu <hjl.tools@gmail.com>
>>> Date:   Fri Jul 1 05:54:43 2016 -0700
>>>
>>>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]

>> That's not really backportable, I'm afraid.  Our users don't expect we break
>> builds in this way.
>>
>
> Who are those users?

We don't know, really.  But moving forward the baseline binutils 
requirement in a stable release really contradicts what a stable release 
is about.

Florian
  
H.J. Lu Oct. 4, 2016, 4:08 p.m. UTC | #9
On Tue, Oct 4, 2016 at 8:48 AM, Florian Weimer <fweimer@redhat.com> wrote:
> On 10/04/2016 05:34 PM, H.J. Lu wrote:
>>
>> On Tue, Oct 4, 2016 at 8:24 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>
>>> On 10/04/2016 04:47 PM, H.J. Lu wrote:
>>>>
>>>>
>>>> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com>
>>>> wrote:
>>>>>
>>>>>
>>>>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>>>>
>>>>>>> Any comments? I will check it in next week if there is no objection.
>>>>>>
>>>>>>
>>>>>>
>>>>>>
>>>>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>>>>
>>>>>
>>>>>
>>>>>
>>>>> Just this change, or the requirement for an AVX512F-capable assembler
>>>>> as
>>>>> well?
>>>>>
>>>>
>>>> Good question.  This is also needed:
>>>>
>>>> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
>>>> Author: H.J. Lu <hjl.tools@gmail.com>
>>>> Date:   Fri Jul 1 05:54:43 2016 -0700
>>>>
>>>>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
>
>
>>> That's not really backportable, I'm afraid.  Our users don't expect we
>>> break
>>> builds in this way.
>>>
>>
>> Who are those users?
>
>
> We don't know, really.  But moving forward the baseline binutils requirement
> in a stable release really contradicts what a stable release is about.
>
>

Do our users expect a broken glibc binary of a stable release on AVX512
machine?
  
Adhemerval Zanella Netto Oct. 4, 2016, 6:13 p.m. UTC | #10
On 04/10/2016 13:08, H.J. Lu wrote:
> On Tue, Oct 4, 2016 at 8:48 AM, Florian Weimer <fweimer@redhat.com> wrote:
>> On 10/04/2016 05:34 PM, H.J. Lu wrote:
>>>
>>> On Tue, Oct 4, 2016 at 8:24 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>>
>>>> On 10/04/2016 04:47 PM, H.J. Lu wrote:
>>>>>
>>>>>
>>>>> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com>
>>>>> wrote:
>>>>>>
>>>>>>
>>>>>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>>>>>
>>>>>>>> Any comments? I will check it in next week if there is no objection.
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>>>>>
>>>>>>
>>>>>>
>>>>>>
>>>>>> Just this change, or the requirement for an AVX512F-capable assembler
>>>>>> as
>>>>>> well?
>>>>>>
>>>>>
>>>>> Good question.  This is also needed:
>>>>>
>>>>> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
>>>>> Author: H.J. Lu <hjl.tools@gmail.com>
>>>>> Date:   Fri Jul 1 05:54:43 2016 -0700
>>>>>
>>>>>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
>>
>>
>>>> That's not really backportable, I'm afraid.  Our users don't expect we
>>>> break
>>>> builds in this way.
>>>>
>>>
>>> Who are those users?
>>
>>
>> We don't know, really.  But moving forward the baseline binutils requirement
>> in a stable release really contradicts what a stable release is about.
>>
>>
> 
> Do our users expect a broken glibc binary of a stable release on AVX512
> machine?
> 

I think 2.24 it is ok since it contains the BZ#20139 fix already.  For 2.23,
although it was not really explicit in NEWS, AVX512 is suppose to be supported
in a set of different implementation (memmove/memcpy/libmvec).  However my
understanding of this issue is limited to be a performance one, so I do not
see a pressing matter to change a release requirements for such change.
  
Florian Weimer Oct. 4, 2016, 7:18 p.m. UTC | #11
On 10/04/2016 08:13 PM, Adhemerval Zanella wrote:
> I think 2.24 it is ok since it contains the BZ#20139 fix already.  For 2.23,
> although it was not really explicit in NEWS, AVX512 is suppose to be supported
> in a set of different implementation (memmove/memcpy/libmvec).  However my
> understanding of this issue is limited to be a performance one, so I do not
> see a pressing matter to change a release requirements for such change.

As far as I understand it, the issue is that the trampoline writes to 
the SSE/AVX/AVX2 registers, which clears the AVX-512F bits (which are 
not saved by the trampoline).

Intel introduced AVX512F in such a manner that you have to upgrade 
kernel and userspace in lock-step, which is of course unrealistic.

Florian
  
Adhemerval Zanella Netto Oct. 4, 2016, 7:54 p.m. UTC | #12
On 04/10/2016 16:18, Florian Weimer wrote:
> On 10/04/2016 08:13 PM, Adhemerval Zanella wrote:
>> I think 2.24 it is ok since it contains the BZ#20139 fix already.  For 2.23,
>> although it was not really explicit in NEWS, AVX512 is suppose to be supported
>> in a set of different implementation (memmove/memcpy/libmvec).  However my
>> understanding of this issue is limited to be a performance one, so I do not
>> see a pressing matter to change a release requirements for such change.
> 
> As far as I understand it, the issue is that the trampoline writes to the SSE/AVX/AVX2 registers, which clears the AVX-512F bits (which are not saved by the trampoline).
> 
> Intel introduced AVX512F in such a manner that you have to upgrade kernel and userspace in lock-step, which is of course unrealistic.
> 
> Florian

Reading the patch and its description leads to see that it tries to
fix a AVX-SEE transition described by this Intel documentation [1].

A more experienced arch developer could correct me, but my understanding
is hardware itself would save/restore the upper AVX 256 and 512 bits 
when SSE/AVX instruction are mixed together.  Am I missing something
here?

[1] https://software.intel.com/sites/default/files/m/d/4/1/d/8/11MC12_Avoiding_2BAVX-SSE_2BTransition_2BPenalties_2Brh_2Bfinal.pdf
  
H.J. Lu Oct. 4, 2016, 9 p.m. UTC | #13
On Tue, Oct 4, 2016 at 11:13 AM, Adhemerval Zanella
<adhemerval.zanella@linaro.org> wrote:
>
>
> On 04/10/2016 13:08, H.J. Lu wrote:
>> On Tue, Oct 4, 2016 at 8:48 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>> On 10/04/2016 05:34 PM, H.J. Lu wrote:
>>>>
>>>> On Tue, Oct 4, 2016 at 8:24 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>>>
>>>>> On 10/04/2016 04:47 PM, H.J. Lu wrote:
>>>>>>
>>>>>>
>>>>>> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com>
>>>>>> wrote:
>>>>>>>
>>>>>>>
>>>>>>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>>>>>>
>>>>>>>>> Any comments? I will check it in next week if there is no objection.
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>> Just this change, or the requirement for an AVX512F-capable assembler
>>>>>>> as
>>>>>>> well?
>>>>>>>
>>>>>>
>>>>>> Good question.  This is also needed:
>>>>>>
>>>>>> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
>>>>>> Author: H.J. Lu <hjl.tools@gmail.com>
>>>>>> Date:   Fri Jul 1 05:54:43 2016 -0700
>>>>>>
>>>>>>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
>>>
>>>
>>>>> That's not really backportable, I'm afraid.  Our users don't expect we
>>>>> break
>>>>> builds in this way.
>>>>>
>>>>
>>>> Who are those users?
>>>
>>>
>>> We don't know, really.  But moving forward the baseline binutils requirement
>>> in a stable release really contradicts what a stable release is about.
>>>
>>>
>>
>> Do our users expect a broken glibc binary of a stable release on AVX512
>> machine?
>>
>
> I think 2.24 it is ok since it contains the BZ#20139 fix already.  For 2.23,
> although it was not really explicit in NEWS, AVX512 is suppose to be supported
> in a set of different implementation (memmove/memcpy/libmvec).  However my
> understanding of this issue is limited to be a performance one, so I do not
> see a pressing matter to change a release requirements for such change.

It is a regression from glibc 2.22.
  
Adhemerval Zanella Netto Oct. 4, 2016, 9:18 p.m. UTC | #14
On 04/10/2016 18:00, H.J. Lu wrote:
> On Tue, Oct 4, 2016 at 11:13 AM, Adhemerval Zanella
> <adhemerval.zanella@linaro.org> wrote:
>>
>>
>> On 04/10/2016 13:08, H.J. Lu wrote:
>>> On Tue, Oct 4, 2016 at 8:48 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>> On 10/04/2016 05:34 PM, H.J. Lu wrote:
>>>>>
>>>>> On Tue, Oct 4, 2016 at 8:24 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>>>>
>>>>>> On 10/04/2016 04:47 PM, H.J. Lu wrote:
>>>>>>>
>>>>>>>
>>>>>>> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com>
>>>>>>> wrote:
>>>>>>>>
>>>>>>>>
>>>>>>>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>>>>>>>
>>>>>>>>>> Any comments? I will check it in next week if there is no objection.
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>
>>>>>>>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>> Just this change, or the requirement for an AVX512F-capable assembler
>>>>>>>> as
>>>>>>>> well?
>>>>>>>>
>>>>>>>
>>>>>>> Good question.  This is also needed:
>>>>>>>
>>>>>>> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
>>>>>>> Author: H.J. Lu <hjl.tools@gmail.com>
>>>>>>> Date:   Fri Jul 1 05:54:43 2016 -0700
>>>>>>>
>>>>>>>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
>>>>
>>>>
>>>>>> That's not really backportable, I'm afraid.  Our users don't expect we
>>>>>> break
>>>>>> builds in this way.
>>>>>>
>>>>>
>>>>> Who are those users?
>>>>
>>>>
>>>> We don't know, really.  But moving forward the baseline binutils requirement
>>>> in a stable release really contradicts what a stable release is about.
>>>>
>>>>
>>>
>>> Do our users expect a broken glibc binary of a stable release on AVX512
>>> machine?
>>>
>>
>> I think 2.24 it is ok since it contains the BZ#20139 fix already.  For 2.23,
>> although it was not really explicit in NEWS, AVX512 is suppose to be supported
>> in a set of different implementation (memmove/memcpy/libmvec).  However my
>> understanding of this issue is limited to be a performance one, so I do not
>> see a pressing matter to change a release requirements for such change.
> 
> It is a regression from glibc 2.22.

Right, but it is functional regression that prevent avx512 binaries to run
correctly on glibc 2.23+ or a performance regression?
  
H.J. Lu Oct. 4, 2016, 9:20 p.m. UTC | #15
On Tue, Oct 4, 2016 at 2:18 PM, Adhemerval Zanella
<adhemerval.zanella@linaro.org> wrote:
>
>
> On 04/10/2016 18:00, H.J. Lu wrote:
>> On Tue, Oct 4, 2016 at 11:13 AM, Adhemerval Zanella
>> <adhemerval.zanella@linaro.org> wrote:
>>>
>>>
>>> On 04/10/2016 13:08, H.J. Lu wrote:
>>>> On Tue, Oct 4, 2016 at 8:48 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>>> On 10/04/2016 05:34 PM, H.J. Lu wrote:
>>>>>>
>>>>>> On Tue, Oct 4, 2016 at 8:24 AM, Florian Weimer <fweimer@redhat.com> wrote:
>>>>>>>
>>>>>>> On 10/04/2016 04:47 PM, H.J. Lu wrote:
>>>>>>>>
>>>>>>>>
>>>>>>>> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com>
>>>>>>>> wrote:
>>>>>>>>>
>>>>>>>>>
>>>>>>>>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
>>>>>>>>>
>>>>>>>>>>> Any comments? I will check it in next week if there is no objection.
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>
>>>>>>>>> Just this change, or the requirement for an AVX512F-capable assembler
>>>>>>>>> as
>>>>>>>>> well?
>>>>>>>>>
>>>>>>>>
>>>>>>>> Good question.  This is also needed:
>>>>>>>>
>>>>>>>> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
>>>>>>>> Author: H.J. Lu <hjl.tools@gmail.com>
>>>>>>>> Date:   Fri Jul 1 05:54:43 2016 -0700
>>>>>>>>
>>>>>>>>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
>>>>>
>>>>>
>>>>>>> That's not really backportable, I'm afraid.  Our users don't expect we
>>>>>>> break
>>>>>>> builds in this way.
>>>>>>>
>>>>>>
>>>>>> Who are those users?
>>>>>
>>>>>
>>>>> We don't know, really.  But moving forward the baseline binutils requirement
>>>>> in a stable release really contradicts what a stable release is about.
>>>>>
>>>>>
>>>>
>>>> Do our users expect a broken glibc binary of a stable release on AVX512
>>>> machine?
>>>>
>>>
>>> I think 2.24 it is ok since it contains the BZ#20139 fix already.  For 2.23,
>>> although it was not really explicit in NEWS, AVX512 is suppose to be supported
>>> in a set of different implementation (memmove/memcpy/libmvec).  However my
>>> understanding of this issue is limited to be a performance one, so I do not
>>> see a pressing matter to change a release requirements for such change.
>>
>> It is a regression from glibc 2.22.
>
> Right, but it is functional regression that prevent avx512 binaries to run
> correctly on glibc 2.23+ or a performance regression?

BZ #20508 is a performance regression.
  
Mike Frysinger Dec. 16, 2016, 11:04 p.m. UTC | #16
On 04 Oct 2016 17:48, Florian Weimer wrote:
> On 10/04/2016 05:34 PM, H.J. Lu wrote:
> > On Tue, Oct 4, 2016 at 8:24 AM, Florian Weimer <fweimer@redhat.com> wrote:
> >> On 10/04/2016 04:47 PM, H.J. Lu wrote:
> >>> On Tue, Oct 4, 2016 at 3:52 AM, Florian Weimer <fweimer@redhat.com> wrote:
> >>>> On 09/27/2016 07:35 PM, H.J. Lu wrote:
> >>>>> I'd like to backport it to 2.23 and 2.24 branches.  Any objections?
> >>>>
> >>>> Just this change, or the requirement for an AVX512F-capable assembler as
> >>>> well?
> >>>
> >>> Good question.  This is also needed:
> >>>
> >>> commit f43cb35c9b3c35addc6dc0f1427caf51786ca1d2
> >>> Author: H.J. Lu <hjl.tools@gmail.com>
> >>> Date:   Fri Jul 1 05:54:43 2016 -0700
> >>>
> >>>     Require binutils 2.24 to build x86-64 glibc [BZ #20139]
> >>
> >> That's not really backportable, I'm afraid.  Our users don't expect we break
> >> builds in this way.
> >
> > Who are those users?
> 
> We don't know, really.  But moving forward the baseline binutils 
> requirement in a stable release really contradicts what a stable release 
> is about.

old post, but i agree here.  our wiki doesn't cover this explicitly that
i can see, but it does:
  Usually, the interested committers have discretion over which bugfixes to
  pick for back-porting, but if discussion arises, general consensus of the
  community is sought, the default choice being to err on the conservative
  side.  Patch backports to stable branches are discussed on libc-stable,
  and any patch on master that doesn't change ABI or API is immediately
  suitable for backporting to a stable branch.

i think we should add "cannot change INSTALL requirements" to the list of
things that are inappropriate.
-mike
  

Patch

From fdb9777e1d770446972f46a80ebfa59d522a93f1 Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Tue, 23 Aug 2016 09:09:32 -0700
Subject: [PATCH] X86-64: Add _dl_runtime_resolve_avx[512]_{opt|slow} [BZ
 #20508]

There is transition penalty when SSE instructions are mixed with 256-bit
AVX or 512-bit AVX512 load instructions.  Since _dl_runtime_resolve_avx
and _dl_runtime_profile_avx512 save/restore 256-bit YMM/512-bit ZMM
registers, there is transition penalty when SSE instructions are used
with lazy binding on AVX and AVX512 processors.

To avoid SSE transition penalty, if only the lower 128 bits of the first
8 vector registers are non-zero, we can preserve %xmm0 - %xmm7 registers
with the zero upper bits.

For AVX and AVX512 processors which support XGETBV with ECX == 1, we can
use XGETBV with ECX == 1 to check if the upper 128 bits of YMM registers
or the upper 256 bits of ZMM registers are zero.  We can restore only the
non-zero portion of vector registers with AVX/AVX512 load instructions
which will zero-extend upper bits of vector registers.

This patch adds _dl_runtime_resolve_sse_vex which saves and restores
XMM registers with 128-bit AVX store/load instructions.  It is used to
preserve YMM/ZMM registers when only the lower 128 bits are non-zero.
_dl_runtime_resolve_avx_opt and _dl_runtime_resolve_avx512_opt are added
and used on AVX/AVX512 processors supporting XGETBV with ECX == 1 so
that we store and load only the non-zero portion of vector registers.
This avoids SSE transition penalty caused by _dl_runtime_resolve_avx and
_dl_runtime_profile_avx512 when only the lower 128 bits of vector
registers are used.

_dl_runtime_resolve_avx_slow is added and used for AVX processors which
don't support XGETBV with ECX == 1.  Since there is no SSE transition
penalty on AVX512 processors which don't support XGETBV with ECX == 1,
_dl_runtime_resolve_avx512_slow isn't provided.

	[BZ #20495]
	[BZ #20508]
	* sysdeps/x86/cpu-features.c (init_cpu_features): For Intel
	processors, set Use_dl_runtime_resolve_slow and set
	Use_dl_runtime_resolve_opt if XGETBV suports ECX == 1.
	* sysdeps/x86/cpu-features.h (bit_arch_Use_dl_runtime_resolve_opt):
	New.
	(bit_arch_Use_dl_runtime_resolve_slow): Likewise.
	(index_arch_Use_dl_runtime_resolve_opt): Likewise.
	(index_arch_Use_dl_runtime_resolve_slow): Likewise.
	* sysdeps/x86_64/dl-machine.h (elf_machine_runtime_setup): Use
	_dl_runtime_resolve_avx512_opt and _dl_runtime_resolve_avx_opt
	if Use_dl_runtime_resolve_opt is set.  Use
	_dl_runtime_resolve_slow if Use_dl_runtime_resolve_slow is set.
	* sysdeps/x86_64/dl-trampoline.S: Include <cpu-features.h>.
	(_dl_runtime_resolve_opt): New.  Defined for AVX and AVX512.
	(_dl_runtime_resolve): Add one for _dl_runtime_resolve_sse_vex.
	* sysdeps/x86_64/dl-trampoline.h (_dl_runtime_resolve_avx_slow):
	New.
	(_dl_runtime_resolve_opt): Likewise.
	(_dl_runtime_profile): Define only if _dl_runtime_profile is
	defined.
---
 sysdeps/x86/cpu-features.c     |  14 ++++++
 sysdeps/x86/cpu-features.h     |   6 +++
 sysdeps/x86_64/dl-machine.h    |  24 +++++++++-
 sysdeps/x86_64/dl-trampoline.S |  20 ++++++++
 sysdeps/x86_64/dl-trampoline.h | 104 ++++++++++++++++++++++++++++++++++++++++-
 5 files changed, 165 insertions(+), 3 deletions(-)

diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c
index 9ce4b49..11b9af2 100644
--- a/sysdeps/x86/cpu-features.c
+++ b/sysdeps/x86/cpu-features.c
@@ -205,6 +205,20 @@  init_cpu_features (struct cpu_features *cpu_features)
       if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
 	cpu_features->feature[index_arch_AVX_Fast_Unaligned_Load]
 	  |= bit_arch_AVX_Fast_Unaligned_Load;
+
+      /* To avoid SSE transition penalty, use _dl_runtime_resolve_slow.
+         If XGETBV suports ECX == 1, use _dl_runtime_resolve_opt.  */
+      cpu_features->feature[index_arch_Use_dl_runtime_resolve_slow]
+	|= bit_arch_Use_dl_runtime_resolve_slow;
+      if (cpu_features->max_cpuid >= 0xd)
+	{
+	  unsigned int eax;
+
+	  __cpuid_count (0xd, 1, eax, ebx, ecx, edx);
+	  if ((eax & (1 << 2)) != 0)
+	    cpu_features->feature[index_arch_Use_dl_runtime_resolve_opt]
+	      |= bit_arch_Use_dl_runtime_resolve_opt;
+	}
     }
   /* This spells out "AuthenticAMD".  */
   else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h
index e891036..bba33e6 100644
--- a/sysdeps/x86/cpu-features.h
+++ b/sysdeps/x86/cpu-features.h
@@ -37,6 +37,8 @@ 
 #define bit_arch_Prefer_No_VZEROUPPER		(1 << 17)
 #define bit_arch_Fast_Unaligned_Copy		(1 << 18)
 #define bit_arch_Prefer_ERMS			(1 << 19)
+#define bit_arch_Use_dl_runtime_resolve_opt	(1 << 20)
+#define bit_arch_Use_dl_runtime_resolve_slow	(1 << 21)
 
 /* CPUID Feature flags.  */
 
@@ -107,6 +109,8 @@ 
 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE
 # define index_arch_Fast_Unaligned_Copy	FEATURE_INDEX_1*FEATURE_SIZE
 # define index_arch_Prefer_ERMS		FEATURE_INDEX_1*FEATURE_SIZE
+# define index_arch_Use_dl_runtime_resolve_opt FEATURE_INDEX_1*FEATURE_SIZE
+# define index_arch_Use_dl_runtime_resolve_slow FEATURE_INDEX_1*FEATURE_SIZE
 
 
 # if defined (_LIBC) && !IS_IN (nonlib)
@@ -277,6 +281,8 @@  extern const struct cpu_features *__get_cpu_features (void)
 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1
 # define index_arch_Fast_Unaligned_Copy	FEATURE_INDEX_1
 # define index_arch_Prefer_ERMS		FEATURE_INDEX_1
+# define index_arch_Use_dl_runtime_resolve_opt FEATURE_INDEX_1
+# define index_arch_Use_dl_runtime_resolve_slow FEATURE_INDEX_1
 
 #endif	/* !__ASSEMBLER__ */
 
diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index ed0c1a8..c0f0fa1 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -68,7 +68,10 @@  elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
   Elf64_Addr *got;
   extern void _dl_runtime_resolve_sse (ElfW(Word)) attribute_hidden;
   extern void _dl_runtime_resolve_avx (ElfW(Word)) attribute_hidden;
+  extern void _dl_runtime_resolve_avx_slow (ElfW(Word)) attribute_hidden;
+  extern void _dl_runtime_resolve_avx_opt (ElfW(Word)) attribute_hidden;
   extern void _dl_runtime_resolve_avx512 (ElfW(Word)) attribute_hidden;
+  extern void _dl_runtime_resolve_avx512_opt (ElfW(Word)) attribute_hidden;
   extern void _dl_runtime_profile_sse (ElfW(Word)) attribute_hidden;
   extern void _dl_runtime_profile_avx (ElfW(Word)) attribute_hidden;
   extern void _dl_runtime_profile_avx512 (ElfW(Word)) attribute_hidden;
@@ -118,9 +121,26 @@  elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
 	     indicated by the offset on the stack, and then jump to
 	     the resolved address.  */
 	  if (HAS_ARCH_FEATURE (AVX512F_Usable))
-	    *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_resolve_avx512;
+	    {
+	      if (HAS_ARCH_FEATURE (Use_dl_runtime_resolve_opt))
+		*(ElfW(Addr) *) (got + 2)
+		  = (ElfW(Addr)) &_dl_runtime_resolve_avx512_opt;
+	      else
+		*(ElfW(Addr) *) (got + 2)
+		  = (ElfW(Addr)) &_dl_runtime_resolve_avx512;
+	    }
 	  else if (HAS_ARCH_FEATURE (AVX_Usable))
-	    *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_resolve_avx;
+	    {
+	      if (HAS_ARCH_FEATURE (Use_dl_runtime_resolve_opt))
+		*(ElfW(Addr) *) (got + 2)
+		  = (ElfW(Addr)) &_dl_runtime_resolve_avx_opt;
+	      else if (HAS_ARCH_FEATURE (Use_dl_runtime_resolve_slow))
+		*(ElfW(Addr) *) (got + 2)
+		  = (ElfW(Addr)) &_dl_runtime_resolve_avx_slow;
+	      else
+		*(ElfW(Addr) *) (got + 2)
+		  = (ElfW(Addr)) &_dl_runtime_resolve_avx;
+	    }
 	  else
 	    *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_resolve_sse;
 	}
diff --git a/sysdeps/x86_64/dl-trampoline.S b/sysdeps/x86_64/dl-trampoline.S
index 12f1a5c..39f595e 100644
--- a/sysdeps/x86_64/dl-trampoline.S
+++ b/sysdeps/x86_64/dl-trampoline.S
@@ -18,6 +18,7 @@ 
 
 #include <config.h>
 #include <sysdep.h>
+#include <cpu-features.h>
 #include <link-defines.h>
 
 #ifndef DL_STACK_ALIGNMENT
@@ -86,9 +87,11 @@ 
 #endif
 #define VEC(i)			zmm##i
 #define _dl_runtime_resolve	_dl_runtime_resolve_avx512
+#define _dl_runtime_resolve_opt	_dl_runtime_resolve_avx512_opt
 #define _dl_runtime_profile	_dl_runtime_profile_avx512
 #include "dl-trampoline.h"
 #undef _dl_runtime_resolve
+#undef _dl_runtime_resolve_opt
 #undef _dl_runtime_profile
 #undef VEC
 #undef VMOV
@@ -104,9 +107,11 @@ 
 #endif
 #define VEC(i)			ymm##i
 #define _dl_runtime_resolve	_dl_runtime_resolve_avx
+#define _dl_runtime_resolve_opt	_dl_runtime_resolve_avx_opt
 #define _dl_runtime_profile	_dl_runtime_profile_avx
 #include "dl-trampoline.h"
 #undef _dl_runtime_resolve
+#undef _dl_runtime_resolve_opt
 #undef _dl_runtime_profile
 #undef VEC
 #undef VMOV
@@ -126,3 +131,18 @@ 
 #define _dl_runtime_profile	_dl_runtime_profile_sse
 #undef RESTORE_AVX
 #include "dl-trampoline.h"
+#undef _dl_runtime_resolve
+#undef _dl_runtime_profile
+#undef VMOV
+#undef VMOVA
+
+/* Used by _dl_runtime_resolve_avx_opt/_dl_runtime_resolve_avx512_opt
+   to preserve the full vector registers with zero upper bits.  */
+#define VMOVA			vmovdqa
+#if DL_RUNTIME_RESOLVE_REALIGN_STACK || VEC_SIZE <= DL_STACK_ALIGNMENT
+# define VMOV			vmovdqa
+#else
+# define VMOV			vmovdqu
+#endif
+#define _dl_runtime_resolve	_dl_runtime_resolve_sse_vex
+#include "dl-trampoline.h"
diff --git a/sysdeps/x86_64/dl-trampoline.h b/sysdeps/x86_64/dl-trampoline.h
index 8161f96..d6c7f98 100644
--- a/sysdeps/x86_64/dl-trampoline.h
+++ b/sysdeps/x86_64/dl-trampoline.h
@@ -50,6 +50,105 @@ 
 #endif
 
 	.text
+#ifdef _dl_runtime_resolve_opt
+/* Use the smallest vector registers to preserve the full YMM/ZMM
+   registers to avoid SSE transition penalty.  */
+
+# if VEC_SIZE == 32
+/* Check if the upper 128 bits in %ymm0 - %ymm7 registers are non-zero
+   and preserve %xmm0 - %xmm7 registers with the zero upper bits.  Since
+   there is no SSE transition penalty on AVX512 processors which don't
+   support XGETBV with ECX == 1, _dl_runtime_resolve_avx512_slow isn't
+   provided.   */
+	.globl _dl_runtime_resolve_avx_slow
+	.hidden _dl_runtime_resolve_avx_slow
+	.type _dl_runtime_resolve_avx_slow, @function
+	.align 16
+_dl_runtime_resolve_avx_slow:
+	cfi_startproc
+	cfi_adjust_cfa_offset(16) # Incorporate PLT
+	vorpd %ymm0, %ymm1, %ymm8
+	vorpd %ymm2, %ymm3, %ymm9
+	vorpd %ymm4, %ymm5, %ymm10
+	vorpd %ymm6, %ymm7, %ymm11
+	vorpd %ymm8, %ymm9, %ymm9
+	vorpd %ymm10, %ymm11, %ymm10
+	vpcmpeqd %xmm8, %xmm8, %xmm8
+	vorpd %ymm9, %ymm10, %ymm10
+	vptest %ymm10, %ymm8
+	# Preserve %ymm0 - %ymm7 registers if the upper 128 bits of any
+	# %ymm0 - %ymm7 registers aren't zero.
+	PRESERVE_BND_REGS_PREFIX
+	jnc _dl_runtime_resolve_avx
+	# Use vzeroupper to avoid SSE transition penalty.
+	vzeroupper
+	# Preserve %xmm0 - %xmm7 registers with the zero upper 128 bits
+	# when the upper 128 bits of %ymm0 - %ymm7 registers are zero.
+	PRESERVE_BND_REGS_PREFIX
+	jmp _dl_runtime_resolve_sse_vex
+	cfi_adjust_cfa_offset(-16) # Restore PLT adjustment
+	cfi_endproc
+	.size _dl_runtime_resolve_avx_slow, .-_dl_runtime_resolve_avx_slow
+# endif
+
+/* Use XGETBV with ECX == 1 to check which bits in vector registers are
+   non-zero and only preserve the non-zero lower bits with zero upper
+   bits.  */
+	.globl _dl_runtime_resolve_opt
+	.hidden _dl_runtime_resolve_opt
+	.type _dl_runtime_resolve_opt, @function
+	.align 16
+_dl_runtime_resolve_opt:
+	cfi_startproc
+	cfi_adjust_cfa_offset(16) # Incorporate PLT
+	pushq %rax
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%rax, 0)
+	pushq %rcx
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%rcx, 0)
+	pushq %rdx
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%rdx, 0)
+	movl $1, %ecx
+	xgetbv
+	movl %eax, %r11d
+	popq %rdx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore (%rdx)
+	popq %rcx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore (%rcx)
+	popq %rax
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore (%rax)
+# if VEC_SIZE == 32
+	# For YMM registers, check if YMM state is in use.
+	andl $bit_YMM_state, %r11d
+	# Preserve %xmm0 - %xmm7 registers with the zero upper 128 bits if
+	# YMM state isn't in use.
+	PRESERVE_BND_REGS_PREFIX
+	jz _dl_runtime_resolve_sse_vex
+# elif VEC_SIZE == 64
+	# For ZMM registers, check if YMM state and ZMM state are in
+	# use.
+	andl $(bit_YMM_state | bit_ZMM0_15_state), %r11d
+	cmpl $bit_YMM_state, %r11d
+	# Preserve %xmm0 - %xmm7 registers with the zero upper 384 bits if
+	# neither YMM state nor ZMM state are in use.
+	PRESERVE_BND_REGS_PREFIX
+	jl _dl_runtime_resolve_sse_vex
+	# Preserve %ymm0 - %ymm7 registers with the zero upper 256 bits if
+	# ZMM state isn't in use.
+	PRESERVE_BND_REGS_PREFIX
+	je _dl_runtime_resolve_avx
+# else
+#  error Unsupported VEC_SIZE!
+# endif
+	cfi_adjust_cfa_offset(-16) # Restore PLT adjustment
+	cfi_endproc
+	.size _dl_runtime_resolve_opt, .-_dl_runtime_resolve_opt
+#endif
 	.globl _dl_runtime_resolve
 	.hidden _dl_runtime_resolve
 	.type _dl_runtime_resolve, @function
@@ -164,7 +263,10 @@  _dl_runtime_resolve:
 	.size _dl_runtime_resolve, .-_dl_runtime_resolve
 
 
-#ifndef PROF
+/* To preserve %xmm0 - %xmm7 registers, dl-trampoline.h is included
+   twice, for _dl_runtime_resolve_sse and _dl_runtime_resolve_sse_vex.
+   But we don't need another _dl_runtime_profile for XMM registers.  */
+#if !defined PROF && defined _dl_runtime_profile
 # if (LR_VECTOR_OFFSET % VEC_SIZE) != 0
 #  error LR_VECTOR_OFFSET must be multples of VEC_SIZE
 # endif
-- 
2.7.4