[11/15] x86_64: Avoid lazy relocation of tlsdesc [BZ #27137]

Message ID 8c6dae9ee45f9af51ce70cb8422582096dbf7ffc.1613390045.git.szabolcs.nagy@arm.com (mailing list archive)
State Superseded
Delegated to: Adhemerval Zanella Netto
Headers
Series Dynamic TLS related data race fixes |

Commit Message

Szabolcs Nagy Feb. 15, 2021, 12:02 p.m. UTC
  Lazy tlsdesc relocation is racy because the static tls optimization and
tlsdesc management operations are done without holding the dlopen lock.

This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67
for aarch64, but it fixes a different race: bug 27137.
---
 sysdeps/x86_64/dl-machine.h | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)
  

Comments

Ben Woodard April 9, 2021, 12:14 a.m. UTC | #1
Don’t you also need to modify elf_machine_runtime_setup It also has a reference to _dl_tlsdesc_resolve_rela that becomes undefined when you try to compile with your patchset including patch 13 where you remove the code.

To make a test build I just commented it out but I think that this patch should remove that if statement as well.

diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index 9a876a371e..2b1b36a739 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -127,9 +127,11 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
        }
     }

-  if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
-    *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
-      = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela;
+  /* Lazy binding of TLSDESC relocations is no longer done so this logic
+     won't apply */
+  /* if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy) */
+  /*   *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr) */
+  /*     = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela; */

   return lazy;
 }


> On Feb 15, 2021, at 4:02 AM, Szabolcs Nagy via Libc-alpha <libc-alpha@sourceware.org> wrote:
> 
> Lazy tlsdesc relocation is racy because the static tls optimization and
> tlsdesc management operations are done without holding the dlopen lock.
> 
> This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67
> for aarch64, but it fixes a different race: bug 27137.
> ---
> sysdeps/x86_64/dl-machine.h | 19 ++++++++++++++-----
> 1 file changed, 14 insertions(+), 5 deletions(-)
> 
> diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
> index 103eee6c3f..9a876a371e 100644
> --- a/sysdeps/x86_64/dl-machine.h
> +++ b/sysdeps/x86_64/dl-machine.h
> @@ -570,12 +570,21 @@ elf_machine_lazy_rel (struct link_map *map,
>     }
>   else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
>     {
> -      struct tlsdesc volatile * __attribute__((__unused__)) td =
> -	(struct tlsdesc volatile *)reloc_addr;
> +      const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
> +      const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
> +      const ElfW (Sym) *sym = &symtab[symndx];
> +      const struct r_found_version *version = NULL;
> 
> -      td->arg = (void*)reloc;
> -      td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
> -			  + map->l_addr);
> +      if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
> +	{
> +	  const ElfW (Half) *vernum =
> +	    (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
> +	  version = &map->l_versions[vernum[symndx] & 0x7fff];
> +	}
> +
> +      /* Always initialize TLS descriptors completely at load time, in
> +	 case static TLS is allocated for it that requires locking.  */
> +      elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc);
>     }
>   else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
>     {
> -- 
> 2.17.1
>
  
Szabolcs Nagy April 9, 2021, 1:38 p.m. UTC | #2
The 04/08/2021 17:14, Ben Woodard wrote:
> Don’t you also need to modify elf_machine_runtime_setup It also has a reference to _dl_tlsdesc_resolve_rela that becomes undefined when you try to compile with your patchset including patch 13 where you remove the code.
> 
> To make a test build I just commented it out but I think that this patch should remove that if statement as well.

thanks,
indeed this was wrong, i tested the wrong branch on x86_64.

i will fix this and post a v2 set with the other feedback.

> 
> diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
> index 9a876a371e..2b1b36a739 100644
> --- a/sysdeps/x86_64/dl-machine.h
> +++ b/sysdeps/x86_64/dl-machine.h
> @@ -127,9 +127,11 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
>         }
>      }
> 
> -  if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
> -    *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
> -      = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela;
> +  /* Lazy binding of TLSDESC relocations is no longer done so this logic
> +     won't apply */
> +  /* if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy) */
> +  /*   *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr) */
> +  /*     = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela; */
> 
>    return lazy;
>  }
> 
> 
> > On Feb 15, 2021, at 4:02 AM, Szabolcs Nagy via Libc-alpha <libc-alpha@sourceware.org> wrote:
> > 
> > Lazy tlsdesc relocation is racy because the static tls optimization and
> > tlsdesc management operations are done without holding the dlopen lock.
> > 
> > This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67
> > for aarch64, but it fixes a different race: bug 27137.
> > ---
> > sysdeps/x86_64/dl-machine.h | 19 ++++++++++++++-----
> > 1 file changed, 14 insertions(+), 5 deletions(-)
> > 
> > diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
> > index 103eee6c3f..9a876a371e 100644
> > --- a/sysdeps/x86_64/dl-machine.h
> > +++ b/sysdeps/x86_64/dl-machine.h
> > @@ -570,12 +570,21 @@ elf_machine_lazy_rel (struct link_map *map,
> >     }
> >   else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
> >     {
> > -      struct tlsdesc volatile * __attribute__((__unused__)) td =
> > -	(struct tlsdesc volatile *)reloc_addr;
> > +      const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
> > +      const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
> > +      const ElfW (Sym) *sym = &symtab[symndx];
> > +      const struct r_found_version *version = NULL;
> > 
> > -      td->arg = (void*)reloc;
> > -      td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
> > -			  + map->l_addr);
> > +      if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
> > +	{
> > +	  const ElfW (Half) *vernum =
> > +	    (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
> > +	  version = &map->l_versions[vernum[symndx] & 0x7fff];
> > +	}
> > +
> > +      /* Always initialize TLS descriptors completely at load time, in
> > +	 case static TLS is allocated for it that requires locking.  */
> > +      elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc);
> >     }
> >   else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
> >     {
> > -- 
> > 2.17.1
> > 
> 

--
  
Ben Woodard April 9, 2021, 2:55 p.m. UTC | #3
> On Apr 9, 2021, at 6:38 AM, Szabolcs Nagy <szabolcs.nagy@arm.com> wrote:
> 
> The 04/08/2021 17:14, Ben Woodard wrote:
>> Don’t you also need to modify elf_machine_runtime_setup It also has a reference to _dl_tlsdesc_resolve_rela that becomes undefined when you try to compile with your patchset including patch 13 where you remove the code.
>> 
>> To make a test build I just commented it out but I think that this patch should remove that if statement as well.
> 
> thanks,
> indeed this was wrong, i tested the wrong branch on x86_64.
> 
> i will fix this and post a v2 set with the other feedback.

On the positive side, I’ve been tracking down a problem where a library compiled with the gnu2 variant of TLS in a way that I haven’t been able to reproduce yet is crashing the dynamic loader when used with a performance tool that uses LD_AUDIT. 

This patch alone (with my tiny modification below) addresses the problem. I say “addresses” because it doesn’t exactly fix the problem; it makes it so that the code with the bug in it isn’t run. Patch 13 in your patch set removes the code with the bug in it. 

I see that patches 1 and 2 of your patch set have already been committed. I would encourage you to consider committing V2 of patch 11 and 13 (or maybe 11-14) even before the rest of the patch set since it addresses a bug that we are seeing in the wild.

-ben

> 
>> 
>> diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
>> index 9a876a371e..2b1b36a739 100644
>> --- a/sysdeps/x86_64/dl-machine.h
>> +++ b/sysdeps/x86_64/dl-machine.h
>> @@ -127,9 +127,11 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
>>        }
>>     }
>> 
>> -  if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
>> -    *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
>> -      = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela;
>> +  /* Lazy binding of TLSDESC relocations is no longer done so this logic
>> +     won't apply */
>> +  /* if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy) */
>> +  /*   *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr) */
>> +  /*     = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela; */
>> 
>>   return lazy;
>> }
>> 
>> 
>>> On Feb 15, 2021, at 4:02 AM, Szabolcs Nagy via Libc-alpha <libc-alpha@sourceware.org> wrote:
>>> 
>>> Lazy tlsdesc relocation is racy because the static tls optimization and
>>> tlsdesc management operations are done without holding the dlopen lock.
>>> 
>>> This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67
>>> for aarch64, but it fixes a different race: bug 27137.
>>> ---
>>> sysdeps/x86_64/dl-machine.h | 19 ++++++++++++++-----
>>> 1 file changed, 14 insertions(+), 5 deletions(-)
>>> 
>>> diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
>>> index 103eee6c3f..9a876a371e 100644
>>> --- a/sysdeps/x86_64/dl-machine.h
>>> +++ b/sysdeps/x86_64/dl-machine.h
>>> @@ -570,12 +570,21 @@ elf_machine_lazy_rel (struct link_map *map,
>>>    }
>>>  else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
>>>    {
>>> -      struct tlsdesc volatile * __attribute__((__unused__)) td =
>>> -	(struct tlsdesc volatile *)reloc_addr;
>>> +      const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
>>> +      const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
>>> +      const ElfW (Sym) *sym = &symtab[symndx];
>>> +      const struct r_found_version *version = NULL;
>>> 
>>> -      td->arg = (void*)reloc;
>>> -      td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
>>> -			  + map->l_addr);
>>> +      if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
>>> +	{
>>> +	  const ElfW (Half) *vernum =
>>> +	    (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
>>> +	  version = &map->l_versions[vernum[symndx] & 0x7fff];
>>> +	}
>>> +
>>> +      /* Always initialize TLS descriptors completely at load time, in
>>> +	 case static TLS is allocated for it that requires locking.  */
>>> +      elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc);
>>>    }
>>>  else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
>>>    {
>>> -- 
>>> 2.17.1
>>> 
>> 
> 
> --
  

Patch

diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index 103eee6c3f..9a876a371e 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -570,12 +570,21 @@  elf_machine_lazy_rel (struct link_map *map,
     }
   else if (__glibc_likely (r_type == R_X86_64_TLSDESC))
     {
-      struct tlsdesc volatile * __attribute__((__unused__)) td =
-	(struct tlsdesc volatile *)reloc_addr;
+      const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
+      const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
+      const ElfW (Sym) *sym = &symtab[symndx];
+      const struct r_found_version *version = NULL;
 
-      td->arg = (void*)reloc;
-      td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)])
-			  + map->l_addr);
+      if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
+	{
+	  const ElfW (Half) *vernum =
+	    (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
+	  version = &map->l_versions[vernum[symndx] & 0x7fff];
+	}
+
+      /* Always initialize TLS descriptors completely at load time, in
+	 case static TLS is allocated for it that requires locking.  */
+      elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc);
     }
   else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE))
     {