[v4,08/14] elf: Remove run-time-writable fields from struct link_map
Checks
| Context |
Check |
Description |
| redhat-pt-bot/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
| linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 |
success
|
Build passed
|
| linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 |
success
|
Test passed
|
| linaro-tcwg-bot/tcwg_glibc_build--master-arm |
success
|
Build passed
|
| linaro-tcwg-bot/tcwg_glibc_check--master-arm |
success
|
Test passed
|
Commit Message
And introduce struct link_map_rw.
These fields are written during run-time relocation (for lazy binding)
or during dlopen, so they are difficult to handle efficiently with
otherwise read-only link maps. Moving them into a separate allocation
makes it possible to keep the read-write while the rest of the link
map is read-only.
Global-dynamic TLS is lazily initialized and may therefore write
to the l_tls_offset field. The code does not acquire the main
loader lock, so this field has to be moved to the read-write space.
It can be moved back to the main link map once global-dynamic
TLS no longer uses lazy initialization.
Auditors can write to the cookie member, so it has to remain
read-write even if other parts of the link map are write-protected.
Allocation of the l_rw part of the rtld link map is changed so that
the auditor states come immediately after it, just as for other
link maps.
The dynamic linker re-runs dependency sorting during process shutdown
in _dl_fini, instead of simply using the reverse initialization order.
(This is required for compatibility with existing applications.)
This means that the l_idx and l_visited fields are written to. There
is no way to report errors during shutdown. If these fields are
always writable, this avoids the need to make link maps writable
during _dl_fini, avoiding the error reporting issue.
---
elf/circleload1.c | 3 +-
elf/dl-call_fini.c | 2 +-
elf/dl-close.c | 106 +++++++++++----------
elf/dl-deps.c | 14 +--
elf/dl-find_object.c | 2 +-
elf/dl-fini.c | 8 +-
elf/dl-init.c | 4 +-
elf/dl-lookup.c | 42 ++++-----
elf/dl-object.c | 17 ++--
elf/dl-open.c | 29 +++---
elf/dl-reloc.c | 15 +--
elf/dl-sort-maps.c | 26 +++---
elf/dl-static-tls.h | 8 +-
elf/dl-support.c | 2 +-
elf/dl-tls.c | 37 ++++----
elf/get-dynamic-info.h | 2 +-
elf/loadtest.c | 4 +-
elf/neededtest.c | 3 +-
elf/neededtest2.c | 3 +-
elf/neededtest3.c | 3 +-
elf/neededtest4.c | 3 +-
elf/rtld.c | 19 ++--
elf/tst-tls_tp_offset.c | 3 +-
elf/unload.c | 2 +-
elf/unload2.c | 2 +-
htl/pt-alloc.c | 5 +-
include/link.h | 123 +++++++++++++++----------
nptl/Versions | 3 +-
nptl_db/db_info.c | 1 +
nptl_db/structs.def | 3 +-
nptl_db/td_thr_tlsbase.c | 12 ++-
stdlib/cxa_thread_atexit_impl.c | 4 +-
sysdeps/aarch64/dl-machine.h | 5 +-
sysdeps/alpha/dl-machine.h | 4 +-
sysdeps/arc/dl-machine.h | 3 +-
sysdeps/arm/dl-machine.h | 4 +-
sysdeps/csky/dl-machine.h | 2 +-
sysdeps/generic/ldsodefs.h | 12 +--
sysdeps/hppa/dl-machine.h | 3 +-
sysdeps/i386/dl-machine.h | 11 ++-
sysdeps/loongarch/dl-tls.h | 2 +-
sysdeps/m68k/dl-tls.h | 2 +-
sysdeps/microblaze/dl-machine.h | 3 +-
sysdeps/mips/dl-tls.h | 2 +-
sysdeps/or1k/dl-machine.h | 4 +-
sysdeps/powerpc/dl-tls.h | 2 +-
sysdeps/powerpc/powerpc32/dl-machine.h | 4 +-
sysdeps/powerpc/powerpc64/dl-machine.h | 4 +-
sysdeps/riscv/dl-tls.h | 2 +-
sysdeps/s390/s390-32/dl-machine.h | 5 +-
sysdeps/s390/s390-64/dl-machine.h | 5 +-
sysdeps/sh/dl-machine.h | 7 +-
sysdeps/sparc/sparc32/dl-machine.h | 4 +-
sysdeps/sparc/sparc64/dl-machine.h | 4 +-
sysdeps/x86/dl-prop.h | 2 +-
sysdeps/x86_64/dl-machine.h | 5 +-
56 files changed, 337 insertions(+), 274 deletions(-)
Comments
On 02/02/25 18:13, Florian Weimer wrote:
> And introduce struct link_map_rw.
>
> These fields are written during run-time relocation (for lazy binding)
> or during dlopen, so they are difficult to handle efficiently with
> otherwise read-only link maps. Moving them into a separate allocation
> makes it possible to keep the read-write while the rest of the link
> map is read-only.
>
> Global-dynamic TLS is lazily initialized and may therefore write
> to the l_tls_offset field. The code does not acquire the main
> loader lock, so this field has to be moved to the read-write space.
> It can be moved back to the main link map once global-dynamic
> TLS no longer uses lazy initialization.
>
> Auditors can write to the cookie member, so it has to remain
> read-write even if other parts of the link map are write-protected.
> Allocation of the l_rw part of the rtld link map is changed so that
> the auditor states come immediately after it, just as for other
> link maps.
>
> The dynamic linker re-runs dependency sorting during process shutdown
> in _dl_fini, instead of simply using the reverse initialization order.
> (This is required for compatibility with existing applications.)
> This means that the l_idx and l_visited fields are written to. There
> is no way to report errors during shutdown. If these fields are
> always writable, this avoids the need to make link maps writable
> during _dl_fini, avoiding the error reporting issue.
Most mechanical changes and it seems ok, but I found an x86_64 build
with --enable-cet.
> ---
> elf/circleload1.c | 3 +-
> elf/dl-call_fini.c | 2 +-
> elf/dl-close.c | 106 +++++++++++----------
> elf/dl-deps.c | 14 +--
> elf/dl-find_object.c | 2 +-
> elf/dl-fini.c | 8 +-
> elf/dl-init.c | 4 +-
> elf/dl-lookup.c | 42 ++++-----
> elf/dl-object.c | 17 ++--
> elf/dl-open.c | 29 +++---
> elf/dl-reloc.c | 15 +--
> elf/dl-sort-maps.c | 26 +++---
> elf/dl-static-tls.h | 8 +-
> elf/dl-support.c | 2 +-
> elf/dl-tls.c | 37 ++++----
> elf/get-dynamic-info.h | 2 +-
> elf/loadtest.c | 4 +-
> elf/neededtest.c | 3 +-
> elf/neededtest2.c | 3 +-
> elf/neededtest3.c | 3 +-
> elf/neededtest4.c | 3 +-
> elf/rtld.c | 19 ++--
> elf/tst-tls_tp_offset.c | 3 +-
> elf/unload.c | 2 +-
> elf/unload2.c | 2 +-
> htl/pt-alloc.c | 5 +-
> include/link.h | 123 +++++++++++++++----------
> nptl/Versions | 3 +-
> nptl_db/db_info.c | 1 +
> nptl_db/structs.def | 3 +-
> nptl_db/td_thr_tlsbase.c | 12 ++-
> stdlib/cxa_thread_atexit_impl.c | 4 +-
> sysdeps/aarch64/dl-machine.h | 5 +-
> sysdeps/alpha/dl-machine.h | 4 +-
> sysdeps/arc/dl-machine.h | 3 +-
> sysdeps/arm/dl-machine.h | 4 +-
> sysdeps/csky/dl-machine.h | 2 +-
> sysdeps/generic/ldsodefs.h | 12 +--
> sysdeps/hppa/dl-machine.h | 3 +-
> sysdeps/i386/dl-machine.h | 11 ++-
> sysdeps/loongarch/dl-tls.h | 2 +-
> sysdeps/m68k/dl-tls.h | 2 +-
> sysdeps/microblaze/dl-machine.h | 3 +-
> sysdeps/mips/dl-tls.h | 2 +-
> sysdeps/or1k/dl-machine.h | 4 +-
> sysdeps/powerpc/dl-tls.h | 2 +-
> sysdeps/powerpc/powerpc32/dl-machine.h | 4 +-
> sysdeps/powerpc/powerpc64/dl-machine.h | 4 +-
> sysdeps/riscv/dl-tls.h | 2 +-
> sysdeps/s390/s390-32/dl-machine.h | 5 +-
> sysdeps/s390/s390-64/dl-machine.h | 5 +-
> sysdeps/sh/dl-machine.h | 7 +-
> sysdeps/sparc/sparc32/dl-machine.h | 4 +-
> sysdeps/sparc/sparc64/dl-machine.h | 4 +-
> sysdeps/x86/dl-prop.h | 2 +-
> sysdeps/x86_64/dl-machine.h | 5 +-
> 56 files changed, 337 insertions(+), 274 deletions(-)
>
> diff --git a/elf/circleload1.c b/elf/circleload1.c
> index 990ff84a84..eeaeb3b8d7 100644
> --- a/elf/circleload1.c
> +++ b/elf/circleload1.c
> @@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
> for (lm = MAPS; lm; lm = lm->l_next)
> {
> if (lm->l_name && lm->l_name[0])
> - printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
> + printf(" %s, count = %d\n", lm->l_name,
> + (int) lm->l_rw->l_direct_opencount);
> if (lm->l_type == lt_loaded && lm->l_name)
> {
> int match = 0;
> diff --git a/elf/dl-call_fini.c b/elf/dl-call_fini.c
> index 950744cb3d..8ee2724453 100644
> --- a/elf/dl-call_fini.c
> +++ b/elf/dl-call_fini.c
> @@ -29,7 +29,7 @@ _dl_call_fini (void *closure_map)
> _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", map->l_name, map->l_ns);
>
> /* Make sure nothing happens if we are called twice. */
> - map->l_init_called = 0;
> + map->l_rw->l_init_called = 0;
>
> ElfW(Dyn) *fini_array = map->l_info[DT_FINI_ARRAY];
> if (fini_array != NULL)
> diff --git a/elf/dl-close.c b/elf/dl-close.c
> index 47bd3dab81..3169ad03bd 100644
> --- a/elf/dl-close.c
> +++ b/elf/dl-close.c
> @@ -109,23 +109,23 @@ void
> _dl_close_worker (struct link_map *map, bool force)
> {
> /* One less direct use. */
> - --map->l_direct_opencount;
> + --map->l_rw->l_direct_opencount;
>
> /* If _dl_close is called recursively (some destructor call dlclose),
> just record that the parent _dl_close will need to do garbage collection
> again and return. */
> static enum { not_pending, pending, rerun } dl_close_state;
>
> - if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
> + if (map->l_rw->l_direct_opencount > 0 || map->l_type != lt_loaded
> || dl_close_state != not_pending)
> {
> - if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
> + if (map->l_rw->l_direct_opencount == 0 && map->l_type == lt_loaded)
> dl_close_state = rerun;
>
> /* There are still references to this object. Do nothing more. */
> if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
> _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
> - map->l_name, map->l_direct_opencount);
> + map->l_name, map->l_rw->l_direct_opencount);
>
> return;
> }
> @@ -147,7 +147,7 @@ _dl_close_worker (struct link_map *map, bool force)
> {
> l->l_map_used = 0;
> l->l_map_done = 0;
> - l->l_idx = idx;
> + l->l_rw->l_idx = idx;
> maps[idx] = l;
> ++idx;
> }
> @@ -157,10 +157,10 @@ _dl_close_worker (struct link_map *map, bool force)
> The map variable is NULL after a retry. */
> if (map != NULL)
> {
> - maps[map->l_idx] = maps[0];
> - maps[map->l_idx]->l_idx = map->l_idx;
> + maps[map->l_rw->l_idx] = maps[0];
> + maps[map->l_rw->l_idx]->l_rw->l_idx = map->l_rw->l_idx;
> maps[0] = map;
> - maps[0]->l_idx = 0;
> + maps[0]->l_rw->l_idx = 0;
> }
>
> /* Keep track of the lowest index link map we have covered already. */
> @@ -175,11 +175,11 @@ _dl_close_worker (struct link_map *map, bool force)
>
> /* Check whether this object is still used. */
> if (l->l_type == lt_loaded
> - && l->l_direct_opencount == 0
> - && !l->l_nodelete_active
> + && l->l_rw->l_direct_opencount == 0
> + && !l->l_rw->l_nodelete_active
> /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
> acquire is sufficient and correct. */
> - && atomic_load_acquire (&l->l_tls_dtor_count) == 0
> + && atomic_load_acquire (&l->l_rw->l_tls_dtor_count) == 0
> && !l->l_map_used)
> continue;
>
> @@ -187,7 +187,7 @@ _dl_close_worker (struct link_map *map, bool force)
> l->l_map_used = 1;
> l->l_map_done = 1;
> /* Signal the object is still needed. */
> - l->l_idx = IDX_STILL_USED;
> + l->l_rw->l_idx = IDX_STILL_USED;
>
> /* Mark all dependencies as used. */
> if (l->l_initfini != NULL)
> @@ -197,9 +197,10 @@ _dl_close_worker (struct link_map *map, bool force)
> struct link_map **lp = &l->l_initfini[1];
> while (*lp != NULL)
> {
> - if ((*lp)->l_idx != IDX_STILL_USED)
> + if ((*lp)->l_rw->l_idx != IDX_STILL_USED)
> {
> - assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
> + assert ((*lp)->l_rw->l_idx >= 0
> + && (*lp)->l_rw->l_idx < nloaded);
>
> if (!(*lp)->l_map_used)
> {
> @@ -208,8 +209,8 @@ _dl_close_worker (struct link_map *map, bool force)
> already processed it, then we need to go back
> and process again from that point forward to
> ensure we keep all of its dependencies also. */
> - if ((*lp)->l_idx - 1 < done_index)
> - done_index = (*lp)->l_idx - 1;
> + if ((*lp)->l_rw->l_idx - 1 < done_index)
> + done_index = (*lp)->l_rw->l_idx - 1;
> }
> }
>
> @@ -217,20 +218,20 @@ _dl_close_worker (struct link_map *map, bool force)
> }
> }
> /* And the same for relocation dependencies. */
> - if (l->l_reldeps != NULL)
> - for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
> + if (l->l_rw->l_reldeps != NULL)
> + for (unsigned int j = 0; j < l->l_rw->l_reldeps->act; ++j)
> {
> - struct link_map *jmap = l->l_reldeps->list[j];
> + struct link_map *jmap = l->l_rw->l_reldeps->list[j];
>
> - if (jmap->l_idx != IDX_STILL_USED)
> + if (jmap->l_rw->l_idx != IDX_STILL_USED)
> {
> - assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
> + assert (jmap->l_rw->l_idx >= 0 && jmap->l_rw->l_idx < nloaded);
>
> if (!jmap->l_map_used)
> {
> jmap->l_map_used = 1;
> - if (jmap->l_idx - 1 < done_index)
> - done_index = jmap->l_idx - 1;
> + if (jmap->l_rw->l_idx - 1 < done_index)
> + done_index = jmap->l_rw->l_idx - 1;
> }
> }
> }
> @@ -255,12 +256,12 @@ _dl_close_worker (struct link_map *map, bool force)
>
> if (!imap->l_map_used)
> {
> - assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
> + assert (imap->l_type == lt_loaded && !imap->l_rw->l_nodelete_active);
>
> /* Call its termination function. Do not do it for
> half-cooked objects. Temporarily disable exception
> handling, so that errors are fatal. */
> - if (imap->l_init_called)
> + if (imap->l_rw->l_init_called)
> _dl_catch_exception (NULL, _dl_call_fini, imap);
>
> #ifdef SHARED
> @@ -327,7 +328,7 @@ _dl_close_worker (struct link_map *map, bool force)
> ((char *) imap->l_scope[cnt]
> - offsetof (struct link_map, l_searchlist));
> assert (tmap->l_ns == nsid);
> - if (tmap->l_idx == IDX_STILL_USED)
> + if (tmap->l_rw->l_idx == IDX_STILL_USED)
> ++remain;
> else
> removed_any = true;
> @@ -372,7 +373,7 @@ _dl_close_worker (struct link_map *map, bool force)
> struct link_map *tmap = (struct link_map *)
> ((char *) imap->l_scope[cnt]
> - offsetof (struct link_map, l_searchlist));
> - if (tmap->l_idx != IDX_STILL_USED)
> + if (tmap->l_rw->l_idx != IDX_STILL_USED)
> {
> /* Remove the scope. Or replace with own map's
> scope. */
> @@ -417,7 +418,7 @@ _dl_close_worker (struct link_map *map, bool force)
> /* The loader is gone, so mark the object as not having one.
> Note: l_idx != IDX_STILL_USED -> object will be removed. */
> if (imap->l_loader != NULL
> - && imap->l_loader->l_idx != IDX_STILL_USED)
> + && imap->l_loader->l_rw->l_idx != IDX_STILL_USED)
> imap->l_loader = NULL;
>
> /* Remember where the first dynamically loaded object is. */
> @@ -507,14 +508,14 @@ _dl_close_worker (struct link_map *map, bool force)
> if (GL(dl_tls_dtv_slotinfo_list) != NULL
> && ! remove_slotinfo (imap->l_tls_modid,
> GL(dl_tls_dtv_slotinfo_list), 0,
> - imap->l_init_called))
> + imap->l_rw->l_init_called))
> /* All dynamically loaded modules with TLS are unloaded. */
> /* Can be read concurrently. */
> atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
> GL(dl_tls_static_nelem));
>
> - if (imap->l_tls_offset != NO_TLS_OFFSET
> - && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
> + if (imap->l_rw->l_tls_offset != NO_TLS_OFFSET
> + && imap->l_rw->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
> {
> /* Collect a contiguous chunk built from the objects in
> this search list, going in either direction. When the
> @@ -522,19 +523,19 @@ _dl_close_worker (struct link_map *map, bool force)
> reclaim it. */
> #if TLS_TCB_AT_TP
> if (tls_free_start == NO_TLS_OFFSET
> - || (size_t) imap->l_tls_offset == tls_free_start)
> + || (size_t) imap->l_rw->l_tls_offset == tls_free_start)
> {
> /* Extend the contiguous chunk being reclaimed. */
> tls_free_start
> - = imap->l_tls_offset - imap->l_tls_blocksize;
> + = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
>
> if (tls_free_end == NO_TLS_OFFSET)
> - tls_free_end = imap->l_tls_offset;
> + tls_free_end = imap->l_rw->l_tls_offset;
> }
> - else if (imap->l_tls_offset - imap->l_tls_blocksize
> + else if (imap->l_rw->l_tls_offset - imap->l_tls_blocksize
> == tls_free_end)
> /* Extend the chunk backwards. */
> - tls_free_end = imap->l_tls_offset;
> + tls_free_end = imap->l_rw->l_tls_offset;
> else
> {
> /* This isn't contiguous with the last chunk freed.
> @@ -543,19 +544,20 @@ _dl_close_worker (struct link_map *map, bool force)
> if (tls_free_end == GL(dl_tls_static_used))
> {
> GL(dl_tls_static_used) = tls_free_start;
> - tls_free_end = imap->l_tls_offset;
> + tls_free_end = imap->l_rw->l_tls_offset;
> tls_free_start
> = tls_free_end - imap->l_tls_blocksize;
> }
> - else if ((size_t) imap->l_tls_offset
> + else if ((size_t) imap->l_rw->l_tls_offset
> == GL(dl_tls_static_used))
> GL(dl_tls_static_used)
> - = imap->l_tls_offset - imap->l_tls_blocksize;
> - else if (tls_free_end < (size_t) imap->l_tls_offset)
> + = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
> + else if (tls_free_end
> + < (size_t) imap->l_rw->l_tls_offset)
> {
> /* We pick the later block. It has a chance to
> be freed. */
> - tls_free_end = imap->l_tls_offset;
> + tls_free_end = imap->l_rw->l_tls_offset;
> tls_free_start
> = tls_free_end - imap->l_tls_blocksize;
> }
> @@ -564,34 +566,37 @@ _dl_close_worker (struct link_map *map, bool force)
> if (tls_free_start == NO_TLS_OFFSET)
> {
> tls_free_start = imap->l_tls_firstbyte_offset;
> - tls_free_end = (imap->l_tls_offset
> + tls_free_end = (imap->l_rw->l_tls_offset
> + imap->l_tls_blocksize);
> }
> else if (imap->l_tls_firstbyte_offset == tls_free_end)
> /* Extend the contiguous chunk being reclaimed. */
> - tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
> - else if (imap->l_tls_offset + imap->l_tls_blocksize
> + tls_free_end = (imap->l_rw->l_tls_offset
> + + imap->l_tls_blocksize);
> + else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
> == tls_free_start)
> /* Extend the chunk backwards. */
> tls_free_start = imap->l_tls_firstbyte_offset;
> /* This isn't contiguous with the last chunk freed.
> One of them will be leaked unless we can free
> one block right away. */
> - else if (imap->l_tls_offset + imap->l_tls_blocksize
> + else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
> == GL(dl_tls_static_used))
> GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
> else if (tls_free_end == GL(dl_tls_static_used))
> {
> GL(dl_tls_static_used) = tls_free_start;
> tls_free_start = imap->l_tls_firstbyte_offset;
> - tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
> + tls_free_end = (imap->l_rw->l_tls_offset
> + + imap->l_tls_blocksize);
> }
> else if (tls_free_end < imap->l_tls_firstbyte_offset)
> {
> /* We pick the later block. It has a chance to
> be freed. */
> tls_free_start = imap->l_tls_firstbyte_offset;
> - tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
> + tls_free_end = (imap->l_rw->l_tls_offset
> + + imap->l_tls_blocksize);
> }
> #else
> # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
> @@ -663,7 +668,8 @@ _dl_close_worker (struct link_map *map, bool force)
> if (imap->l_origin != (char *) -1)
> free ((char *) imap->l_origin);
>
> - free (imap->l_reldeps);
> + free (imap->l_rw->l_reldeps);
> + free (imap->l_rw);
>
> /* Print debugging message. */
> if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
> @@ -769,7 +775,7 @@ _dl_close (void *_map)
> before we took the lock. There is no way to detect this (see below)
> so we proceed assuming this isn't the case. First see whether we
> can remove the object at all. */
> - if (__glibc_unlikely (map->l_nodelete_active))
> + if (__glibc_unlikely (map->l_rw->l_nodelete_active))
> {
> /* Nope. Do nothing. */
> __rtld_lock_unlock_recursive (GL(dl_load_lock));
> @@ -786,7 +792,7 @@ _dl_close (void *_map)
> should be a detectable case and given that dlclose should be threadsafe
> we need this to be a reliable detection.
> This is bug 20990. */
> - if (__builtin_expect (map->l_direct_opencount, 1) == 0)
> + if (__builtin_expect (map->l_rw->l_direct_opencount, 1) == 0)
> {
> __rtld_lock_unlock_recursive (GL(dl_load_lock));
> _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
> diff --git a/elf/dl-deps.c b/elf/dl-deps.c
> index 3c8a5ebced..3235b1d462 100644
> --- a/elf/dl-deps.c
> +++ b/elf/dl-deps.c
> @@ -478,20 +478,20 @@ _dl_map_object_deps (struct link_map *map,
>
> /* Maybe we can remove some relocation dependencies now. */
> struct link_map_reldeps *l_reldeps = NULL;
> - if (map->l_reldeps != NULL)
> + if (map->l_rw->l_reldeps != NULL)
> {
> for (i = 0; i < nlist; ++i)
> map->l_searchlist.r_list[i]->l_reserved = 1;
>
> /* Avoid removing relocation dependencies of the main binary. */
> map->l_reserved = 0;
> - struct link_map **list = &map->l_reldeps->list[0];
> - for (i = 0; i < map->l_reldeps->act; ++i)
> + struct link_map **list = &map->l_rw->l_reldeps->list[0];
> + for (i = 0; i < map->l_rw->l_reldeps->act; ++i)
> if (list[i]->l_reserved)
> {
> /* Need to allocate new array of relocation dependencies. */
> l_reldeps = malloc (sizeof (*l_reldeps)
> - + map->l_reldepsmax
> + + map->l_rw->l_reldepsmax
> * sizeof (struct link_map *));
> if (l_reldeps == NULL)
> /* Bad luck, keep the reldeps duplicated between
> @@ -502,7 +502,7 @@ _dl_map_object_deps (struct link_map *map,
> unsigned int j = i;
> memcpy (&l_reldeps->list[0], &list[0],
> i * sizeof (struct link_map *));
> - for (i = i + 1; i < map->l_reldeps->act; ++i)
> + for (i = i + 1; i < map->l_rw->l_reldeps->act; ++i)
> if (!list[i]->l_reserved)
> l_reldeps->list[j++] = list[i];
> l_reldeps->act = j;
> @@ -547,8 +547,8 @@ _dl_map_object_deps (struct link_map *map,
> if (l_reldeps != NULL)
> {
> atomic_write_barrier ();
> - void *old_l_reldeps = map->l_reldeps;
> - map->l_reldeps = l_reldeps;
> + void *old_l_reldeps = map->l_rw->l_reldeps;
> + map->l_rw->l_reldeps = l_reldeps;
> _dl_scope_free (old_l_reldeps);
> }
> if (old_l_initfini != NULL)
> diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
> index 1e76373292..d8d09ffe0b 100644
> --- a/elf/dl-find_object.c
> +++ b/elf/dl-find_object.c
> @@ -508,7 +508,7 @@ _dlfo_process_initial (void)
> if (l != main_map && l == l->l_real)
> {
> /* lt_library link maps are implicitly NODELETE. */
> - if (l->l_type == lt_library || l->l_nodelete_active)
> + if (l->l_type == lt_library || l->l_rw->l_nodelete_active)
> {
> if (_dlfo_nodelete_mappings != NULL)
> /* Second pass only. */
> diff --git a/elf/dl-fini.c b/elf/dl-fini.c
> index 3add4f77c1..3f3848ee89 100644
> --- a/elf/dl-fini.c
> +++ b/elf/dl-fini.c
> @@ -78,12 +78,12 @@ _dl_fini (void)
> assert (i < nloaded);
>
> maps[i] = l;
> - l->l_idx = i;
> + l->l_rw->l_idx = i;
> ++i;
>
> /* Bump l_direct_opencount of all objects so that they
> are not dlclose()ed from underneath us. */
> - ++l->l_direct_opencount;
> + ++l->l_rw->l_direct_opencount;
> }
> else
> /* Used below to call la_objclose for the ld.so proxy
> @@ -115,7 +115,7 @@ _dl_fini (void)
> {
> struct link_map *l = maps[i];
>
> - if (l->l_init_called)
> + if (l->l_rw->l_init_called)
> {
> _dl_call_fini (l);
> #ifdef SHARED
> @@ -125,7 +125,7 @@ _dl_fini (void)
> }
>
> /* Correct the previous increment. */
> - --l->l_direct_opencount;
> + --l->l_rw->l_direct_opencount;
> }
>
> if (proxy_link_map != NULL)
> diff --git a/elf/dl-init.c b/elf/dl-init.c
> index 2271208e68..ad82a47d75 100644
> --- a/elf/dl-init.c
> +++ b/elf/dl-init.c
> @@ -34,13 +34,13 @@ call_init (struct link_map *l, int argc, char **argv, char **env)
> need relocation.) */
> assert (l->l_relocated || l->l_type == lt_executable);
>
> - if (l->l_init_called)
> + if (l->l_rw->l_init_called)
> /* This object is all done. */
> return;
>
> /* Avoid handling this constructor again in case we have a circular
> dependency. */
> - l->l_init_called = 1;
> + l->l_rw->l_init_called = 1;
>
> /* Check for object which constructors we do not run here. */
> if (__builtin_expect (l->l_name[0], 'a') == '\0'
> diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
> index ece647f009..415c4f3c78 100644
> --- a/elf/dl-lookup.c
> +++ b/elf/dl-lookup.c
> @@ -175,9 +175,9 @@ static void
> mark_nodelete (struct link_map *map, int flags)
> {
> if (flags & DL_LOOKUP_FOR_RELOCATE)
> - map->l_nodelete_pending = true;
> + map->l_rw->l_nodelete_pending = true;
> else
> - map->l_nodelete_active = true;
> + map->l_rw->l_nodelete_active = true;
> }
>
> /* Return true if MAP is marked as NODELETE according to the lookup
> @@ -187,8 +187,8 @@ is_nodelete (struct link_map *map, int flags)
> {
> /* Non-pending NODELETE always counts. Pending NODELETE only counts
> during initial relocation processing. */
> - return map->l_nodelete_active
> - || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_nodelete_pending);
> + return map->l_rw->l_nodelete_active
> + || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_rw->l_nodelete_pending);
> }
>
> /* Utility function for do_lookup_x. Lookup an STB_GNU_UNIQUE symbol
> @@ -532,7 +532,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
> return 0;
>
> struct link_map_reldeps *l_reldeps
> - = atomic_forced_read (undef_map->l_reldeps);
> + = atomic_forced_read (undef_map->l_rw->l_reldeps);
>
> /* Make sure l_reldeps is read before l_initfini. */
> atomic_read_barrier ();
> @@ -591,22 +591,22 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
>
> /* Redo the l_reldeps check if undef_map's l_reldeps changed in
> the mean time. */
> - if (undef_map->l_reldeps != NULL)
> + if (undef_map->l_rw->l_reldeps != NULL)
> {
> - if (undef_map->l_reldeps != l_reldeps)
> + if (undef_map->l_rw->l_reldeps != l_reldeps)
> {
> - struct link_map **list = &undef_map->l_reldeps->list[0];
> - l_reldepsact = undef_map->l_reldeps->act;
> + struct link_map **list = &undef_map->l_rw->l_reldeps->list[0];
> + l_reldepsact = undef_map->l_rw->l_reldeps->act;
> for (i = 0; i < l_reldepsact; ++i)
> if (list[i] == map)
> goto out_check;
> }
> - else if (undef_map->l_reldeps->act > l_reldepsact)
> + else if (undef_map->l_rw->l_reldeps->act > l_reldepsact)
> {
> struct link_map **list
> - = &undef_map->l_reldeps->list[0];
> + = &undef_map->l_rw->l_reldeps->list[0];
> i = l_reldepsact;
> - l_reldepsact = undef_map->l_reldeps->act;
> + l_reldepsact = undef_map->l_rw->l_reldeps->act;
> for (; i < l_reldepsact; ++i)
> if (list[i] == map)
> goto out_check;
> @@ -662,14 +662,14 @@ marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
> }
>
> /* Add the reference now. */
> - if (__glibc_unlikely (l_reldepsact >= undef_map->l_reldepsmax))
> + if (__glibc_unlikely (l_reldepsact >= undef_map->l_rw->l_reldepsmax))
> {
> /* Allocate more memory for the dependency list. Since this
> can never happen during the startup phase we can use
> `realloc'. */
> struct link_map_reldeps *newp;
> - unsigned int max
> - = undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
> + unsigned int max = (undef_map->l_rw->l_reldepsmax
> + ? undef_map->l_rw->l_reldepsmax * 2 : 10);
>
> #ifdef RTLD_PREPARE_FOREIGN_CALL
> RTLD_PREPARE_FOREIGN_CALL;
> @@ -696,23 +696,23 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
> else
> {
> if (l_reldepsact)
> - memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
> + memcpy (&newp->list[0], &undef_map->l_rw->l_reldeps->list[0],
> l_reldepsact * sizeof (struct link_map *));
> newp->list[l_reldepsact] = map;
> newp->act = l_reldepsact + 1;
> atomic_write_barrier ();
> - void *old = undef_map->l_reldeps;
> - undef_map->l_reldeps = newp;
> - undef_map->l_reldepsmax = max;
> + void *old = undef_map->l_rw->l_reldeps;
> + undef_map->l_rw->l_reldeps = newp;
> + undef_map->l_rw->l_reldepsmax = max;
> if (old)
> _dl_scope_free (old);
> }
> }
> else
> {
> - undef_map->l_reldeps->list[l_reldepsact] = map;
> + undef_map->l_rw->l_reldeps->list[l_reldepsact] = map;
> atomic_write_barrier ();
> - undef_map->l_reldeps->act = l_reldepsact + 1;
> + undef_map->l_rw->l_reldeps->act = l_reldepsact + 1;
> }
>
> /* Display information if we are debugging. */
> diff --git a/elf/dl-object.c b/elf/dl-object.c
> index 51d3704edc..db9c635c7e 100644
> --- a/elf/dl-object.c
> +++ b/elf/dl-object.c
> @@ -89,15 +89,20 @@ _dl_new_object (char *realname, const char *libname, int type,
> # define audit_space 0
> #endif
>
> - new = (struct link_map *) calloc (sizeof (*new) + audit_space
> - + sizeof (struct link_map *)
> - + sizeof (*newname) + libname_len, 1);
> + new = calloc (sizeof (*new)
> + + sizeof (struct link_map_private *)
> + + sizeof (*newname) + libname_len, 1);
> if (new == NULL)
> return NULL;
> + new->l_rw = calloc (1, sizeof (*new->l_rw) + audit_space);
> + if (new->l_rw == NULL)
> + {
> + free (new);
> + return NULL;
> + }
>
> new->l_real = new;
> - new->l_symbolic_searchlist.r_list = (struct link_map **) ((char *) (new + 1)
> - + audit_space);
> + new->l_symbolic_searchlist.r_list = (struct link_map **) ((char *) (new + 1));
>
> new->l_libname = newname
> = (struct libname_list *) (new->l_symbolic_searchlist.r_list + 1);
> @@ -131,7 +136,7 @@ _dl_new_object (char *realname, const char *libname, int type,
> new->l_used = 1;
> new->l_loader = loader;
> #if NO_TLS_OFFSET != 0
> - new->l_tls_offset = NO_TLS_OFFSET;
> + new->l_rw->l_tls_offset = NO_TLS_OFFSET;
> #endif
> new->l_ns = nsid;
>
> diff --git a/elf/dl-open.c b/elf/dl-open.c
> index 4fb77e3ff7..85d6bbc7c2 100644
> --- a/elf/dl-open.c
> +++ b/elf/dl-open.c
> @@ -261,7 +261,7 @@ resize_scopes (struct link_map *new)
>
> /* If the initializer has been called already, the object has
> not been loaded here and now. */
> - if (imap->l_init_called && imap->l_type == lt_loaded)
> + if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
> {
> if (scope_has_map (imap, new))
> /* Avoid duplicates. */
> @@ -325,7 +325,7 @@ update_scopes (struct link_map *new)
> struct link_map *imap = new->l_searchlist.r_list[i];
> int from_scope = 0;
>
> - if (imap->l_init_called && imap->l_type == lt_loaded)
> + if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
> {
> if (scope_has_map (imap, new))
> /* Avoid duplicates. */
> @@ -424,7 +424,7 @@ activate_nodelete (struct link_map *new)
> NODELETE status for objects outside the local scope. */
> for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
> l = l->l_next)
> - if (l->l_nodelete_pending)
> + if (l->l_rw->l_nodelete_pending)
> {
> if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
> _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
> @@ -433,11 +433,11 @@ activate_nodelete (struct link_map *new)
> /* The flag can already be true at this point, e.g. a signal
> handler may have triggered lazy binding and set NODELETE
> status immediately. */
> - l->l_nodelete_active = true;
> + l->l_rw->l_nodelete_active = true;
>
> /* This is just a debugging aid, to indicate that
> activate_nodelete has run for this map. */
> - l->l_nodelete_pending = false;
> + l->l_rw->l_nodelete_pending = false;
> }
> }
>
> @@ -476,7 +476,7 @@ _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
> _dl_start_profile ();
>
> /* Prevent unloading the object. */
> - GL(dl_profile_map)->l_nodelete_active = true;
> + GL(dl_profile_map)->l_rw->l_nodelete_active = true;
> }
> }
> else
> @@ -505,7 +505,7 @@ is_already_fully_open (struct link_map *map, int mode)
> /* The object is already in the global scope if requested. */
> && (!(mode & RTLD_GLOBAL) || map->l_global)
> /* The object is already NODELETE if requested. */
> - && (!(mode & RTLD_NODELETE) || map->l_nodelete_active));
> + && (!(mode & RTLD_NODELETE) || map->l_rw->l_nodelete_active));
> }
>
> static void
> @@ -547,7 +547,7 @@ dl_open_worker_begin (void *a)
> return;
>
> /* This object is directly loaded. */
> - ++new->l_direct_opencount;
> + ++new->l_rw->l_direct_opencount;
>
> /* It was already open. See is_already_fully_open above. */
> if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
> @@ -555,7 +555,8 @@ dl_open_worker_begin (void *a)
> /* Let the user know about the opencount. */
> if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
> _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
> - new->l_name, new->l_ns, new->l_direct_opencount);
> + new->l_name, new->l_ns,
> + new->l_rw->l_direct_opencount);
>
> #ifdef SHARED
> /* No relocation processing on this execution path. But
> @@ -576,10 +577,10 @@ dl_open_worker_begin (void *a)
> if (__glibc_unlikely (mode & RTLD_NODELETE))
> {
> if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
> - && !new->l_nodelete_active)
> + && !new->l_rw->l_nodelete_active)
> _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
> new->l_name, new->l_ns);
> - new->l_nodelete_active = true;
> + new->l_rw->l_nodelete_active = true;
> }
>
> /* Finalize the addition to the global scope. */
> @@ -592,7 +593,7 @@ dl_open_worker_begin (void *a)
> /* Schedule NODELETE marking for the directly loaded object if
> requested. */
> if (__glibc_unlikely (mode & RTLD_NODELETE))
> - new->l_nodelete_pending = true;
> + new->l_rw->l_nodelete_pending = true;
>
> /* Load that object's dependencies. */
> _dl_map_object_deps (new, NULL, 0, 0,
> @@ -795,7 +796,7 @@ dl_open_worker (void *a)
> /* Let the user know about the opencount. */
> if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
> _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
> - new->l_name, new->l_ns, new->l_direct_opencount);
> + new->l_name, new->l_ns, new->l_rw->l_direct_opencount);
> }
>
> void *
> @@ -881,7 +882,7 @@ no more namespaces available for dlmopen()"));
> if (is_already_fully_open (args.map, mode))
> {
> /* We can use the fast path. */
> - ++args.map->l_direct_opencount;
> + ++args.map->l_rw->l_direct_opencount;
> __rtld_lock_unlock_recursive (GL(dl_load_lock));
> return args.map;
> }
> diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c
> index 05bf54bebd..603390498b 100644
> --- a/elf/dl-reloc.c
> +++ b/elf/dl-reloc.c
> @@ -41,7 +41,7 @@
> dynamically loaded. This can only work if there is enough surplus in
> the static TLS area already allocated for each running thread. If this
> object's TLS segment is too big to fit, we fail with -1. If it fits,
> - we set MAP->l_tls_offset and return 0.
> + we set MAP->l_rw->l_tls_offset and return 0.
> A portion of the surplus static TLS can be optionally used to optimize
> dynamic TLS access (with TLSDESC or powerpc TLS optimizations).
> If OPTIONAL is true then TLS is allocated for such optimization and
> @@ -53,7 +53,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
> {
> /* If we've already used the variable with dynamic access, or if the
> alignment requirements are too high, fail. */
> - if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
> + if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
> || map->l_tls_align > GLRO (dl_tls_static_align))
> {
> fail:
> @@ -81,7 +81,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
>
> size_t offset = GL(dl_tls_static_used) + use;
>
> - map->l_tls_offset = GL(dl_tls_static_used) = offset;
> + map->l_rw->l_tls_offset = GL(dl_tls_static_used) = offset;
> #elif TLS_DTV_AT_TP
> /* dl_tls_static_used includes the TCB at the beginning. */
> size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
> @@ -100,7 +100,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
> else if (optional)
> GL(dl_tls_static_optional) -= use;
>
> - map->l_tls_offset = offset;
> + map->l_rw->l_tls_offset = offset;
> map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
> GL(dl_tls_static_used) = used;
> #else
> @@ -134,7 +134,7 @@ void
> __attribute_noinline__
> _dl_allocate_static_tls (struct link_map *map)
> {
> - if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
> + if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
> || _dl_try_allocate_static_tls (map, false))
> {
> _dl_signal_error (0, map->l_name, NULL, N_("\
> @@ -150,9 +150,10 @@ void
> _dl_nothread_init_static_tls (struct link_map *map)
> {
> #if TLS_TCB_AT_TP
> - void *dest = (char *) THREAD_SELF - map->l_tls_offset;
> + void *dest = (char *) THREAD_SELF - map->l_rw->l_tls_offset;
> #elif TLS_DTV_AT_TP
> - void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
> + void *dest = ((char *) THREAD_SELF + map->l_rw->l_tls_offset
> + + TLS_PRE_TCB_SIZE);
> #else
> # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
> #endif
> diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
> index e5740dfc1d..14c323c83e 100644
> --- a/elf/dl-sort-maps.c
> +++ b/elf/dl-sort-maps.c
> @@ -51,7 +51,7 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
> {
> /* Do not handle ld.so in secondary namespaces and objects which
> are not removed. */
> - if (thisp != thisp->l_real || thisp->l_idx == -1)
> + if (thisp != thisp->l_real || thisp->l_rw->l_idx == -1)
> goto skip;
> }
>
> @@ -87,10 +87,10 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
> goto next;
> }
>
> - if (__glibc_unlikely (for_fini && maps[k]->l_reldeps != NULL))
> + if (__glibc_unlikely (for_fini && maps[k]->l_rw->l_reldeps != NULL))
> {
> - unsigned int m = maps[k]->l_reldeps->act;
> - struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
> + unsigned int m = maps[k]->l_rw->l_reldeps->act;
> + struct link_map **relmaps = &maps[k]->l_rw->l_reldeps->list[0];
>
> /* Look through the relocation dependencies of the object. */
> while (m-- > 0)
> @@ -137,32 +137,32 @@ dfs_traversal (struct link_map ***rpo, struct link_map *map,
> {
> /* _dl_map_object_deps ignores l_faked objects when calculating the
> number of maps before calling _dl_sort_maps, ignore them as well. */
> - if (map->l_visited || map->l_faked)
> + if (map->l_rw->l_visited || map->l_faked)
> return;
>
> - map->l_visited = 1;
> + map->l_rw->l_visited = 1;
>
> if (map->l_initfini)
> {
> for (int i = 0; map->l_initfini[i] != NULL; i++)
> {
> struct link_map *dep = map->l_initfini[i];
> - if (dep->l_visited == 0
> + if (dep->l_rw->l_visited == 0
> && dep->l_main_map == 0)
> dfs_traversal (rpo, dep, do_reldeps);
> }
> }
>
> - if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
> + if (__glibc_unlikely (do_reldeps != NULL && map->l_rw->l_reldeps != NULL))
> {
> /* Indicate that we encountered relocation dependencies during
> traversal. */
> *do_reldeps = true;
>
> - for (int m = map->l_reldeps->act - 1; m >= 0; m--)
> + for (int m = map->l_rw->l_reldeps->act - 1; m >= 0; m--)
> {
> - struct link_map *dep = map->l_reldeps->list[m];
> - if (dep->l_visited == 0
> + struct link_map *dep = map->l_rw->l_reldeps->list[m];
> + if (dep->l_rw->l_visited == 0
> && dep->l_main_map == 0)
> dfs_traversal (rpo, dep, do_reldeps);
> }
> @@ -181,7 +181,7 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
> {
> struct link_map *first_map = maps[0];
> for (int i = nmaps - 1; i >= 0; i--)
> - maps[i]->l_visited = 0;
> + maps[i]->l_rw->l_visited = 0;
>
> /* We apply DFS traversal for each of maps[i] until the whole total order
> is found and we're at the start of the Reverse-Postorder (RPO) sequence,
> @@ -244,7 +244,7 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
> if (do_reldeps)
> {
> for (int i = nmaps - 1; i >= 0; i--)
> - rpo[i]->l_visited = 0;
> + rpo[i]->l_rw->l_visited = 0;
>
> struct link_map **maps_head = &maps[nmaps];
> for (int i = nmaps - 1; i >= 0; i--)
> diff --git a/elf/dl-static-tls.h b/elf/dl-static-tls.h
> index 3bc29007a3..473d194ed6 100644
> --- a/elf/dl-static-tls.h
> +++ b/elf/dl-static-tls.h
> @@ -29,8 +29,8 @@
> can't be done, we fall back to the error that DF_STATIC_TLS is
> intended to produce. */
> #define HAVE_STATIC_TLS(map, sym_map) \
> - (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET \
> - && ((sym_map)->l_tls_offset \
> + (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET \
> + && ((sym_map)->l_rw->l_tls_offset \
> != FORCED_DYNAMIC_TLS_OFFSET), 1))
>
> #define CHECK_STATIC_TLS(map, sym_map) \
> @@ -40,9 +40,9 @@
> } while (0)
>
> #define TRY_STATIC_TLS(map, sym_map) \
> - (__builtin_expect ((sym_map)->l_tls_offset \
> + (__builtin_expect ((sym_map)->l_rw->l_tls_offset \
> != FORCED_DYNAMIC_TLS_OFFSET, 1) \
> - && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
> + && (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET, 1)\
> || _dl_try_allocate_static_tls (sym_map, true) == 0))
>
> int _dl_try_allocate_static_tls (struct link_map *map, bool optional)
> diff --git a/elf/dl-support.c b/elf/dl-support.c
> index a7d5a5e8ab..aa2be3e934 100644
> --- a/elf/dl-support.c
> +++ b/elf/dl-support.c
> @@ -82,6 +82,7 @@ int _dl_bind_not;
> static struct link_map _dl_main_map =
> {
> .l_name = (char *) "",
> + .l_rw = &(struct link_map_rw) { .l_tls_offset = NO_TLS_OFFSET, },
> .l_real = &_dl_main_map,
> .l_ns = LM_ID_BASE,
> .l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
> @@ -98,7 +99,6 @@ static struct link_map _dl_main_map =
> .l_scope = _dl_main_map.l_scope_mem,
> .l_local_scope = { &_dl_main_map.l_searchlist },
> .l_used = 1,
> - .l_tls_offset = NO_TLS_OFFSET,
> .l_serial = 1,
> };
>
> diff --git a/elf/dl-tls.c b/elf/dl-tls.c
> index 8306a39e8d..a4a826e6a4 100644
> --- a/elf/dl-tls.c
> +++ b/elf/dl-tls.c
> @@ -299,7 +299,7 @@ _dl_determine_tlsoffset (void)
>
> /* XXX For some architectures we perhaps should store the
> negative offset. */
> - l->l_tls_offset = off;
> + l->l_rw->l_tls_offset = off;
> continue;
> }
> }
> @@ -316,7 +316,7 @@ _dl_determine_tlsoffset (void)
>
> /* XXX For some architectures we perhaps should store the
> negative offset. */
> - l->l_tls_offset = off;
> + l->l_rw->l_tls_offset = off;
> }
>
> /* Insert the extra TLS block after the last TLS block. */
> @@ -378,9 +378,9 @@ _dl_determine_tlsoffset (void)
> off += l->l_tls_align;
> if (off + l->l_tls_blocksize - firstbyte <= freetop)
> {
> - l->l_tls_offset = off - firstbyte;
> + l->l_rw->l_tls_offset = off - firstbyte;
> freebottom = (off + l->l_tls_blocksize
> - - firstbyte);
> +- - firstbyte);
> continue;
> }
> }
> @@ -389,7 +389,7 @@ _dl_determine_tlsoffset (void)
> if (off - offset < firstbyte)
> off += l->l_tls_align;
>
> - l->l_tls_offset = off - firstbyte;
> + l->l_rw->l_tls_offset = off - firstbyte;
> if (off - firstbyte - offset > freetop - freebottom)
> {
> freebottom = offset;
> @@ -645,17 +645,17 @@ _dl_allocate_tls_init (void *result, bool main_thread)
> dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
> dtv[map->l_tls_modid].pointer.to_free = NULL;
>
> - if (map->l_tls_offset == NO_TLS_OFFSET
> - || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
> + if (map->l_rw->l_tls_offset == NO_TLS_OFFSET
> + || map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
> continue;
>
> assert (map->l_tls_modid == total + cnt);
> assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
> #if TLS_TCB_AT_TP
> - assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
> - dest = (char *) result - map->l_tls_offset;
> + assert ((size_t) map->l_rw->l_tls_offset >= map->l_tls_blocksize);
> + dest = (char *) result - map->l_rw->l_tls_offset;
> #elif TLS_DTV_AT_TP
> - dest = (char *) result + map->l_tls_offset;
> + dest = (char *) result + map->l_rw->l_tls_offset;
> #else
> # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
> #endif
> @@ -959,22 +959,23 @@ tls_get_addr_tail (tls_index *ti, dtv_t *dtv, struct link_map *the_map)
> variable into static storage, we'll wait until the address in the
> static TLS block is set up, and use that. If we're undecided
> yet, make sure we make the decision holding the lock as well. */
> - if (__glibc_unlikely (the_map->l_tls_offset
> + if (__glibc_unlikely (the_map->l_rw->l_tls_offset
> != FORCED_DYNAMIC_TLS_OFFSET))
> {
> __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
> - if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
> + if (__glibc_likely (the_map->l_rw->l_tls_offset == NO_TLS_OFFSET))
> {
> - the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
> + the_map->l_rw->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
> __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
> }
> - else if (__glibc_likely (the_map->l_tls_offset
> + else if (__glibc_likely (the_map->l_rw->l_tls_offset
> != FORCED_DYNAMIC_TLS_OFFSET))
> {
> #if TLS_TCB_AT_TP
> - void *p = (char *) THREAD_SELF - the_map->l_tls_offset;
> + void *p = (char *) THREAD_SELF - the_map->l_rw->l_tls_offset;
> #elif TLS_DTV_AT_TP
> - void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE;
> + void *p = ((char *) THREAD_SELF + the_map->l_rw->l_tls_offset
> + + TLS_PRE_TCB_SIZE);
> #else
> # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
> #endif
> @@ -1223,9 +1224,9 @@ static inline void __attribute__((always_inline))
> init_one_static_tls (struct pthread *curp, struct link_map *map)
> {
> # if TLS_TCB_AT_TP
> - void *dest = (char *) curp - map->l_tls_offset;
> + void *dest = (char *) curp - map->l_rw->l_tls_offset;
> # elif TLS_DTV_AT_TP
> - void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
> + void *dest = (char *) curp + map->l_rw->l_tls_offset + TLS_PRE_TCB_SIZE;
> # else
> # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
> # endif
> diff --git a/elf/get-dynamic-info.h b/elf/get-dynamic-info.h
> index d3d830e86c..3154bd69b0 100644
> --- a/elf/get-dynamic-info.h
> +++ b/elf/get-dynamic-info.h
> @@ -163,7 +163,7 @@ elf_get_dynamic_info (struct link_map *l, bool bootstrap,
> {
> l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
> if (l->l_flags_1 & DF_1_NODELETE)
> - l->l_nodelete_pending = true;
> + l->l_rw->l_nodelete_pending = true;
>
> /* Only DT_1_SUPPORTED_MASK bits are supported, and we would like
> to assert this, but we can't. Users have been setting
> diff --git a/elf/loadtest.c b/elf/loadtest.c
> index b5eab5e93c..2da6279f2f 100644
> --- a/elf/loadtest.c
> +++ b/elf/loadtest.c
> @@ -78,7 +78,7 @@ static const struct
> for (map = MAPS; map != NULL; map = map->l_next) \
> if (map->l_type == lt_loaded) \
> printf ("name = \"%s\", direct_opencount = %d\n", \
> - map->l_name, (int) map->l_direct_opencount); \
> + map->l_name, (int) map->l_rw->l_direct_opencount); \
> fflush (stdout); \
> } \
> while (0)
> @@ -190,7 +190,7 @@ main (int argc, char *argv[])
> if (map->l_type == lt_loaded)
> {
> printf ("name = \"%s\", direct_opencount = %d\n",
> - map->l_name, (int) map->l_direct_opencount);
> + map->l_name, (int) map->l_rw->l_direct_opencount);
> result = 1;
> }
>
> diff --git a/elf/neededtest.c b/elf/neededtest.c
> index 3cea499314..eccf4cbb10 100644
> --- a/elf/neededtest.c
> +++ b/elf/neededtest.c
> @@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
> for (lm = MAPS; lm; lm = lm->l_next)
> {
> if (lm->l_name && lm->l_name[0])
> - printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
> + printf(" %s, count = %d\n", lm->l_name,
> + (int) lm->l_rw->l_direct_opencount);
> if (lm->l_type == lt_loaded && lm->l_name)
> {
> int match = 0;
> diff --git a/elf/neededtest2.c b/elf/neededtest2.c
> index 17c75f2ba3..aa695cd4bb 100644
> --- a/elf/neededtest2.c
> +++ b/elf/neededtest2.c
> @@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
> for (lm = MAPS; lm; lm = lm->l_next)
> {
> if (lm->l_name && lm->l_name[0])
> - printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
> + printf(" %s, count = %d\n", lm->l_name,
> + (int) lm->l_rw->l_direct_opencount);
> if (lm->l_type == lt_loaded && lm->l_name)
> {
> int match = 0;
> diff --git a/elf/neededtest3.c b/elf/neededtest3.c
> index 41970cf2c7..0b9ee75be3 100644
> --- a/elf/neededtest3.c
> +++ b/elf/neededtest3.c
> @@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
> for (lm = MAPS; lm; lm = lm->l_next)
> {
> if (lm->l_name && lm->l_name[0])
> - printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
> + printf(" %s, count = %d\n", lm->l_name,
> + (int) lm->l_rw->l_direct_opencount);
> if (lm->l_type == lt_loaded && lm->l_name)
> {
> int match = 0;
> diff --git a/elf/neededtest4.c b/elf/neededtest4.c
> index 0ae0b7ff47..cb4f574265 100644
> --- a/elf/neededtest4.c
> +++ b/elf/neededtest4.c
> @@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
> for (lm = MAPS; lm; lm = lm->l_next)
> {
> if (lm->l_name && lm->l_name[0])
> - printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
> + printf(" %s, count = %d\n", lm->l_name,
> + (int) lm->l_rw->l_direct_opencount);
> if (lm->l_type == lt_loaded && lm->l_name)
> {
> int match = 0;
> diff --git a/elf/rtld.c b/elf/rtld.c
> index 115f1da37f..1bb369ef2b 100644
> --- a/elf/rtld.c
> +++ b/elf/rtld.c
> @@ -460,6 +460,17 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
> interfere with __rtld_static_init. */
> GLRO (dl_find_object) = &_dl_find_object;
>
> + /* Pre-allocated read-write status of the ld.so link map. */
> + static struct
> + {
> + struct link_map_rw l;
> + struct auditstate _dl_rtld_auditstate[DL_NNS];
> + } rtld_map_rw;
> + _dl_rtld_map.l_rw = &rtld_map_rw.l;
> +#if NO_TLS_OFFSET != 0
> + _dl_rtld_map.l_rw->l_tls_offset = NO_TLS_OFFSET;
> +#endif
> +
> /* If it hasn't happen yet record the startup time. */
> rtld_timer_start (&start_time);
> #if !defined DONT_USE_BOOTSTRAP_MAP
> @@ -482,7 +493,7 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
> /* Copy the TLS related data if necessary. */
> #ifndef DONT_USE_BOOTSTRAP_MAP
> # if NO_TLS_OFFSET != 0
> - _dl_rtld_map.l_tls_offset = NO_TLS_OFFSET;
> + _dl_rtld_map.l_rw->l_tls_offset = NO_TLS_OFFSET;
> # endif
> #endif
>
> @@ -549,10 +560,6 @@ _dl_start (void *arg)
> bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
> elf_get_dynamic_info (&bootstrap_map, true, false);
>
> -#if NO_TLS_OFFSET != 0
> - bootstrap_map.l_tls_offset = NO_TLS_OFFSET;
> -#endif
> -
> #ifdef ELF_MACHINE_BEFORE_RTLD_RELOC
> ELF_MACHINE_BEFORE_RTLD_RELOC (&bootstrap_map, bootstrap_map.l_info);
> #endif
> @@ -1100,7 +1107,7 @@ rtld_setup_main_map (struct link_map *main_map)
> /* Perhaps the executable has no PT_LOAD header entries at all. */
> main_map->l_map_start = ~0;
> /* And it was opened directly. */
> - ++main_map->l_direct_opencount;
> + ++main_map->l_rw->l_direct_opencount;
> main_map->l_contiguous = 1;
>
> /* A PT_LOAD segment at an unexpected address will clear the
> diff --git a/elf/tst-tls_tp_offset.c b/elf/tst-tls_tp_offset.c
> index a8faebc0eb..ff9a89a125 100644
> --- a/elf/tst-tls_tp_offset.c
> +++ b/elf/tst-tls_tp_offset.c
> @@ -34,7 +34,8 @@ do_test (void)
> printf ("thread variable address: %p\n", &thread_var);
> printf ("thread pointer address: %p\n", __thread_pointer ());
> printf ("pthread_self address: %p\n", (void *) pthread_self ());
> - ptrdiff_t block_offset = ((struct link_map *) _r_debug.r_map)->l_tls_offset;
> + ptrdiff_t block_offset
> + = ((struct link_map *) _r_debug.r_map)->l_rw->l_tls_offset;
> printf ("main program TLS block offset: %td\n", block_offset);
>
> if ((uintptr_t) &thread_var < (uintptr_t) THREAD_SELF)
> diff --git a/elf/unload.c b/elf/unload.c
> index 4566f226f8..39d7b1adac 100644
> --- a/elf/unload.c
> +++ b/elf/unload.c
> @@ -15,7 +15,7 @@
> for (map = MAPS; map != NULL; map = map->l_next) \
> if (map->l_type == lt_loaded) \
> printf ("name = \"%s\", direct_opencount = %d\n", \
> - map->l_name, (int) map->l_direct_opencount); \
> + map->l_name, (int) map->l_rw->l_direct_opencount); \
> fflush (stdout)
>
> typedef struct
> diff --git a/elf/unload2.c b/elf/unload2.c
> index eef2bfd426..88fdd0a57c 100644
> --- a/elf/unload2.c
> +++ b/elf/unload2.c
> @@ -12,7 +12,7 @@
> for (map = MAPS; map != NULL; map = map->l_next) \
> if (map->l_type == lt_loaded) \
> printf ("name = \"%s\", direct_opencount = %d\n", \
> - map->l_name, (int) map->l_direct_opencount); \
> + map->l_name, (int) map->l_rw->l_direct_opencount); \
> fflush (stdout)
>
> int
> diff --git a/htl/pt-alloc.c b/htl/pt-alloc.c
> index c0074b4447..06e36c766e 100644
> --- a/htl/pt-alloc.c
> +++ b/htl/pt-alloc.c
> @@ -217,9 +217,10 @@ __pthread_init_static_tls (struct link_map *map)
> continue;
>
> # if TLS_TCB_AT_TP
> - void *dest = (char *) t->tcb - map->l_tls_offset;
> + void *dest = (char *) t->tcb - map->l_rw->l_tls_offset;
> # elif TLS_DTV_AT_TP
> - void *dest = (char *) t->tcb + map->l_tls_offset + TLS_PRE_TCB_SIZE;
> + void *dest = ((char *) t->tcb + map->l_rw->l_tls_offset
> + + TLS_PRE_TCB_SIZE);
> # else
> # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
> # endif
> diff --git a/include/link.h b/include/link.h
> index 518bfd1670..2fddf315d4 100644
> --- a/include/link.h
> +++ b/include/link.h
> @@ -83,6 +83,71 @@ struct r_search_path_struct
> extern struct r_search_path_struct __rtld_search_dirs attribute_hidden;
> extern struct r_search_path_struct __rtld_env_path_list attribute_hidden;
>
> +
> +/* Link map attributes that are always readable and writable. */
> +struct link_map_rw
> +{
> + /* List of the dependencies introduced through symbol binding. */
> + struct link_map_reldeps
> + {
> + unsigned int act;
> + struct link_map *list[];
> + } *l_reldeps;
> + unsigned int l_reldepsmax;
> +
> + /* Reference count for dlopen/dlclose. */
> + unsigned int l_direct_opencount;
> +
> + /* For objects present at startup time: offset in the static TLS
> + block. For loaded objects, it can be NO_TLS_OFFSET (not yet
> + initialized), FORCED_DYNAMIC_TLS_OFFSET (if fully dynamic TLS is
> + used), or an actual TLS offset (if the static TLS allocation has
> + been re-used to satisfy dynamic TLS needs).
> +
> + This field is written outside the general loader lock, so it has
> + to reside in the read-write porition of the link map. */
> +#ifndef NO_TLS_OFFSET
> +# define NO_TLS_OFFSET 0
> +#endif
> +#ifndef FORCED_DYNAMIC_TLS_OFFSET
> +# if NO_TLS_OFFSET == 0
> +# define FORCED_DYNAMIC_TLS_OFFSET -1
> +# elif NO_TLS_OFFSET == -1
> +# define FORCED_DYNAMIC_TLS_OFFSET -2
> +# else
> +# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
> +# endif
> +#endif
> + ptrdiff_t l_tls_offset;
> +
> + /* Number of thread_local objects constructed by this DSO. This is
> + atomically accessed and modified and is not always protected by the load
> + lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
> + size_t l_tls_dtor_count;
> +
> + /* Ture if ELF constructors have been called. */
> + bool l_init_called;
> +
This patch misses to update sysdeps/x86_64/dl-cet.c, otherwise the x86_64
with --enable-cet fails with:
../sysdeps/x86_64/dl-cet.c: In function ‘dl_check_legacy_object’:
../sysdeps/x86_64/dl-cet.c:73:12: error: ‘struct link_map’ has no member named ‘l_init_called’
73 | if (l->l_init_called)
| ^~
> + /* NODELETE status of the map. Only valid for maps of type
> + lt_loaded. Lazy binding sets l_nodelete_active directly,
> + potentially from signal handlers. Initial loading of an
> + DF_1_NODELETE object set l_nodelete_pending. Relocation may
> + set l_nodelete_pending as well. l_nodelete_pending maps are
> + promoted to l_nodelete_active status in the final stages of
> + dlopen, prior to calling ELF constructors. dlclose only
> + refuses to unload l_nodelete_active maps, the pending status is
> + ignored. */
> + bool l_nodelete_active;
> + bool l_nodelete_pending;
> +
> + /* Used for dependency sorting in dlclose/_dl_fini. These need to
> + be writable all the time because there is no way to report an
> + error in _dl_fini. These flags can be moved into struct
> + link_map_private once _dl_fini no longer re-sorts link maps. */
> + bool l_visited;
> + int l_idx;
> +};
> +
> /* Structure describing a loaded shared object. The `l_next' and `l_prev'
> members form a chain of all the shared objects loaded at startup.
>
> @@ -111,6 +176,9 @@ struct link_map
> than one namespace. */
> struct link_map *l_real;
>
> + /* Run-time writable fields. */
> + struct link_map_rw *l_rw;
> +
> /* Number of the namespace this link map belongs to. */
> Lmid_t l_ns;
>
> @@ -170,7 +238,6 @@ struct link_map
> const Elf_Symndx *l_buckets;
> };
>
> - unsigned int l_direct_opencount; /* Reference count for dlopen/dlclose. */
> enum /* Where this object came from. */
> {
> lt_executable, /* The main executable program. */
> @@ -180,12 +247,9 @@ struct link_map
> unsigned int l_dt_relr_ref:1; /* Nonzero if GLIBC_ABI_DT_RELR is
> referenced. */
> unsigned int l_relocated:1; /* Nonzero if object's relocations done. */
> - unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
> unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
> unsigned int l_reserved:2; /* Reserved for internal use. */
> unsigned int l_main_map:1; /* Nonzero for the map of the main program. */
> - unsigned int l_visited:1; /* Used internally for map dependency
> - graph traversal. */
> unsigned int l_map_used:1; /* These two bits are used during traversal */
> unsigned int l_map_done:1; /* of maps in _dl_close_worker. */
> unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
> @@ -214,18 +278,6 @@ struct link_map
> lt_library map. */
> unsigned int l_tls_in_slotinfo:1; /* TLS slotinfo updated in dlopen. */
>
> - /* NODELETE status of the map. Only valid for maps of type
> - lt_loaded. Lazy binding sets l_nodelete_active directly,
> - potentially from signal handlers. Initial loading of an
> - DF_1_NODELETE object set l_nodelete_pending. Relocation may
> - set l_nodelete_pending as well. l_nodelete_pending maps are
> - promoted to l_nodelete_active status in the final stages of
> - dlopen, prior to calling ELF constructors. dlclose only
> - refuses to unload l_nodelete_active maps, the pending status is
> - ignored. */
> - bool l_nodelete_active;
> - bool l_nodelete_pending;
> -
> #include <link_map.h>
>
> /* Collected information about own RPATH directories. */
> @@ -277,14 +329,6 @@ struct link_map
> /* List of object in order of the init and fini calls. */
> struct link_map **l_initfini;
>
> - /* List of the dependencies introduced through symbol binding. */
> - struct link_map_reldeps
> - {
> - unsigned int act;
> - struct link_map *list[];
> - } *l_reldeps;
> - unsigned int l_reldepsmax;
> -
> /* Nonzero if the DSO is used. */
> unsigned int l_used;
>
> @@ -293,9 +337,6 @@ struct link_map
> ElfW(Word) l_flags_1;
> ElfW(Word) l_flags;
>
> - /* Temporarily used in `dl_close'. */
> - int l_idx;
> -
> struct link_map_machine l_mach;
>
> struct
> @@ -318,28 +359,9 @@ struct link_map
> size_t l_tls_align;
> /* Offset of first byte module alignment. */
> size_t l_tls_firstbyte_offset;
> -#ifndef NO_TLS_OFFSET
> -# define NO_TLS_OFFSET 0
> -#endif
> -#ifndef FORCED_DYNAMIC_TLS_OFFSET
> -# if NO_TLS_OFFSET == 0
> -# define FORCED_DYNAMIC_TLS_OFFSET -1
> -# elif NO_TLS_OFFSET == -1
> -# define FORCED_DYNAMIC_TLS_OFFSET -2
> -# else
> -# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
> -# endif
> -#endif
> - /* For objects present at startup time: offset in the static TLS block. */
> - ptrdiff_t l_tls_offset;
> /* Index of the module in the dtv array. */
> size_t l_tls_modid;
>
> - /* Number of thread_local objects constructed by this DSO. This is
> - atomically accessed and modified and is not always protected by the load
> - lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
> - size_t l_tls_dtor_count;
> -
> /* Information used to change permission after the relocations are
> done. */
> ElfW(Addr) l_relro_addr;
> @@ -350,15 +372,16 @@ struct link_map
>
> #include <dl-relocate-ld.h>
>
> -/* Information used by audit modules. For most link maps, this data
> - immediate follows the link map in memory. For the dynamic linker,
> - it is allocated separately. See link_map_audit_state in
> - <ldsodefs.h>. */
> +/* Information used by audit modules. An array of size GLRO (naudit)
> + elements follows the l_rw link map data in memory (in some cases
> + conservatively extended to to DL_NNS). */
> struct auditstate
> {
> uintptr_t cookie;
> unsigned int bindflags;
> };
> +_Static_assert (__alignof (struct auditstate) <= __alignof (struct link_map_rw),
> + "auditstate alignment compatible with link_map_rw alignment");
>
>
> /* This is the hidden instance of struct r_debug_extended used by the
> diff --git a/nptl/Versions b/nptl/Versions
> index 3221de89d1..ea1ab9e5a8 100644
> --- a/nptl/Versions
> +++ b/nptl/Versions
> @@ -404,8 +404,9 @@ libc {
> _thread_db_dtv_slotinfo_map;
> _thread_db_dtv_t_counter;
> _thread_db_dtv_t_pointer_val;
> + _thread_db_link_map_l_rw;
> _thread_db_link_map_l_tls_modid;
> - _thread_db_link_map_l_tls_offset;
> + _thread_db_link_map_rw_l_tls_offset;
> _thread_db_list_t_next;
> _thread_db_list_t_prev;
> _thread_db_pthread_cancelhandling;
> diff --git a/nptl_db/db_info.c b/nptl_db/db_info.c
> index fe7a750485..6748c500a6 100644
> --- a/nptl_db/db_info.c
> +++ b/nptl_db/db_info.c
> @@ -38,6 +38,7 @@ typedef struct
> } dtv;
>
> typedef struct link_map link_map;
> +typedef struct link_map_rw link_map_rw;
> typedef struct rtld_global rtld_global;
> typedef struct dtv_slotinfo_list dtv_slotinfo_list;
> typedef struct dtv_slotinfo dtv_slotinfo;
> diff --git a/nptl_db/structs.def b/nptl_db/structs.def
> index 93c76c8c3c..90a0752000 100644
> --- a/nptl_db/structs.def
> +++ b/nptl_db/structs.def
> @@ -93,7 +93,8 @@ DB_STRUCT (pthread_key_data_level2)
> DB_STRUCT_ARRAY_FIELD (pthread_key_data_level2, data)
>
> DB_STRUCT_FIELD (link_map, l_tls_modid)
> -DB_STRUCT_FIELD (link_map, l_tls_offset)
> +DB_STRUCT_FIELD (link_map, l_rw)
> +DB_STRUCT_FIELD (link_map_rw, l_tls_offset)
>
> DB_STRUCT_ARRAY_FIELD (dtv, dtv)
> #define pointer_val pointer.val /* Field of anonymous struct in dtv_t. */
> diff --git a/nptl_db/td_thr_tlsbase.c b/nptl_db/td_thr_tlsbase.c
> index 3e4cdb5ee8..4a7a441e8d 100644
> --- a/nptl_db/td_thr_tlsbase.c
> +++ b/nptl_db/td_thr_tlsbase.c
> @@ -191,9 +191,15 @@ td_thr_tlsbase (const td_thrhandle_t *th,
> /* Is the DTV current enough? */
> if (dtvgen < modgen)
> {
> - try_static_tls:
> - /* If the module uses Static TLS, we're still good. */
> - err = DB_GET_FIELD (temp, th->th_ta_p, map, link_map, l_tls_offset, 0);
> + try_static_tls:;
> + /* If the module uses Static TLS, we're still good. Follow the
> + l_rw pointer to l_tls_offset. */
> + psaddr_t l_rw;
> + err = DB_GET_FIELD (l_rw, th->th_ta_p, map, link_map, l_rw, 0);
> + if (err != TD_OK)
> + return err;
> + err = DB_GET_FIELD (temp, th->th_ta_p, l_rw, link_map_rw,
> + l_tls_offset, 0);
> if (err != TD_OK)
> return err;
> ptrdiff_t tlsoff = (uintptr_t)temp;
> diff --git a/stdlib/cxa_thread_atexit_impl.c b/stdlib/cxa_thread_atexit_impl.c
> index 7e7ac774a4..3e23fbc626 100644
> --- a/stdlib/cxa_thread_atexit_impl.c
> +++ b/stdlib/cxa_thread_atexit_impl.c
> @@ -133,7 +133,7 @@ __cxa_thread_atexit_impl (dtor_func func, void *obj, void *dso_symbol)
> _dl_close_worker is protected by the dl_load_lock. The execution in
> __call_tls_dtors does not really depend on this value beyond the fact that
> it should be atomic, so Relaxed MO should be sufficient. */
> - atomic_fetch_add_relaxed (&lm_cache->l_tls_dtor_count, 1);
> + atomic_fetch_add_relaxed (&lm_cache->l_rw->l_tls_dtor_count, 1);
> __rtld_lock_unlock_recursive (GL(dl_load_lock));
>
> new->map = lm_cache;
> @@ -159,7 +159,7 @@ __call_tls_dtors (void)
> l_tls_dtor_count decrement. That way, we protect this access from a
> potential DSO unload in _dl_close_worker, which happens when
> l_tls_dtor_count is 0. See CONCURRENCY NOTES for more detail. */
> - atomic_fetch_add_release (&cur->map->l_tls_dtor_count, -1);
> + atomic_fetch_add_release (&cur->map->l_rw->l_tls_dtor_count, -1);
> free (cur);
> }
> }
> diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h
> index bb8f8a9bb1..266ccc2fa0 100644
> --- a/sysdeps/aarch64/dl-machine.h
> +++ b/sysdeps/aarch64/dl-machine.h
> @@ -249,7 +249,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> else
> # endif
> {
> - td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
> + td->arg = (void*)(sym->st_value
> + + sym_map->l_rw->l_tls_offset
> + reloc->r_addend);
> td->entry = _dl_tlsdesc_return;
> }
> @@ -274,7 +275,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> {
> CHECK_STATIC_TLS (map, sym_map);
> *reloc_addr =
> - sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
> + sym->st_value + reloc->r_addend + sym_map->l_rw->l_tls_offset;
> }
> break;
>
> diff --git a/sysdeps/alpha/dl-machine.h b/sysdeps/alpha/dl-machine.h
> index b9de9164c7..eb2dc57518 100644
> --- a/sysdeps/alpha/dl-machine.h
> +++ b/sysdeps/alpha/dl-machine.h
> @@ -401,12 +401,12 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> else if (r_type == R_ALPHA_TPREL64)
> {
> # ifdef RTLD_BOOTSTRAP
> - *reloc_addr = sym_raw_value + map->l_tls_offset;
> + *reloc_addr = sym_raw_value + map->l_rw->l_tls_offset;
> # else
> if (sym_map)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr = sym_raw_value + sym_map->l_tls_offset;
> + *reloc_addr = sym_raw_value + sym_map->l_rw->l_tls_offset;
> }
> # endif
> }
> diff --git a/sysdeps/arc/dl-machine.h b/sysdeps/arc/dl-machine.h
> index 044cdf1063..8f825d2a5d 100644
> --- a/sysdeps/arc/dl-machine.h
> +++ b/sysdeps/arc/dl-machine.h
> @@ -284,7 +284,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
> + *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
> + + reloc->r_addend);
> }
> break;
>
> diff --git a/sysdeps/arm/dl-machine.h b/sysdeps/arm/dl-machine.h
> index e597c41348..1fbff8a052 100644
> --- a/sysdeps/arm/dl-machine.h
> +++ b/sysdeps/arm/dl-machine.h
> @@ -394,7 +394,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
> # endif
> # endif
> {
> - td->argument.value = value + sym_map->l_tls_offset;
> + td->argument.value = value + sym_map->l_rw->l_tls_offset;
> td->entry = _dl_tlsdesc_return;
> }
> }
> @@ -424,7 +424,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr += sym->st_value + sym_map->l_tls_offset;
> + *reloc_addr += sym->st_value + sym_map->l_rw->l_tls_offset;
> }
> break;
> case R_ARM_IRELATIVE:
> diff --git a/sysdeps/csky/dl-machine.h b/sysdeps/csky/dl-machine.h
> index dd8ff4a647..47a3e90163 100644
> --- a/sysdeps/csky/dl-machine.h
> +++ b/sysdeps/csky/dl-machine.h
> @@ -302,7 +302,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr = (sym->st_value + sym_map->l_tls_offset
> + *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
> + reloc->r_addend);
> }
> break;
> diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
> index e8418973ed..6973fe6dbe 100644
> --- a/sysdeps/generic/ldsodefs.h
> +++ b/sysdeps/generic/ldsodefs.h
> @@ -1342,15 +1342,9 @@ is_rtld_link_map (const struct link_map *l)
> static inline struct auditstate *
> link_map_audit_state (struct link_map *l, size_t index)
> {
> - if (is_rtld_link_map (l))
> - /* The auditstate array is stored separately. */
> - return _dl_rtld_auditstate + index;
> - else
> - {
> - /* The auditstate array follows the link map in memory. */
> - struct auditstate *base = (struct auditstate *) (l + 1);
> - return &base[index];
> - }
> + /* The auditstate array follows the read-write link map part in memory. */
> + struct auditstate *base = (struct auditstate *) (l->l_rw + 1);
> + return &base[index];
> }
>
> /* Call the la_objsearch from the audit modules from the link map L. If
> diff --git a/sysdeps/hppa/dl-machine.h b/sysdeps/hppa/dl-machine.h
> index dd2cf0a050..b285fffb00 100644
> --- a/sysdeps/hppa/dl-machine.h
> +++ b/sysdeps/hppa/dl-machine.h
> @@ -715,7 +715,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - value = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
> + value = (sym_map->l_rw->l_tls_offset + sym->st_value
> + + reloc->r_addend);
> }
> break;
> #endif /* use TLS */
> diff --git a/sysdeps/i386/dl-machine.h b/sysdeps/i386/dl-machine.h
> index 87b77429dd..f8acc5bdd7 100644
> --- a/sysdeps/i386/dl-machine.h
> +++ b/sysdeps/i386/dl-machine.h
> @@ -353,7 +353,8 @@ and creates an unsatisfiable circular dependency.\n",
> # endif
> # endif
> {
> - td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
> + td->arg = (void*)(sym->st_value
> + - sym_map->l_rw->l_tls_offset
> + (ElfW(Word))td->arg);
> td->entry = _dl_tlsdesc_return;
> }
> @@ -363,7 +364,7 @@ and creates an unsatisfiable circular dependency.\n",
> case R_386_TLS_TPOFF32:
> /* The offset is positive, backward from the thread pointer. */
> # ifdef RTLD_BOOTSTRAP
> - *reloc_addr += map->l_tls_offset - sym->st_value;
> + *reloc_addr += map->l_rw->l_tls_offset - sym->st_value;
> # else
> /* We know the offset of object the symbol is contained in.
> It is a positive value which will be subtracted from the
> @@ -372,14 +373,14 @@ and creates an unsatisfiable circular dependency.\n",
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr += sym_map->l_tls_offset - sym->st_value;
> + *reloc_addr += sym_map->l_rw->l_tls_offset - sym->st_value;
> }
> # endif
> break;
> case R_386_TLS_TPOFF:
> /* The offset is negative, forward from the thread pointer. */
> # ifdef RTLD_BOOTSTRAP
> - *reloc_addr += sym->st_value - map->l_tls_offset;
> + *reloc_addr += sym->st_value - map->l_rw->l_tls_offset;
> # else
> /* We know the offset of object the symbol is contained in.
> It is a negative value which will be added to the
> @@ -387,7 +388,7 @@ and creates an unsatisfiable circular dependency.\n",
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr += sym->st_value - sym_map->l_tls_offset;
> + *reloc_addr += sym->st_value - sym_map->l_rw->l_tls_offset;
> }
> # endif
> break;
> diff --git a/sysdeps/loongarch/dl-tls.h b/sysdeps/loongarch/dl-tls.h
> index b25d599882..e7ee817408 100644
> --- a/sysdeps/loongarch/dl-tls.h
> +++ b/sysdeps/loongarch/dl-tls.h
> @@ -37,7 +37,7 @@ extern void *__tls_get_addr (tls_index *ti);
>
> /* Compute the value for a GOTTPREL reloc. */
> #define TLS_TPREL_VALUE(sym_map, sym) \
> - ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
> + ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
>
> /* Compute the value for a DTPREL reloc. */
> #define TLS_DTPREL_VALUE(sym) ((sym)->st_value - TLS_DTV_OFFSET)
> diff --git a/sysdeps/m68k/dl-tls.h b/sysdeps/m68k/dl-tls.h
> index 85817fcce9..96a9f5edac 100644
> --- a/sysdeps/m68k/dl-tls.h
> +++ b/sysdeps/m68k/dl-tls.h
> @@ -35,7 +35,7 @@ typedef struct
>
> /* Compute the value for a TPREL reloc. */
> #define TLS_TPREL_VALUE(sym_map, sym, reloc) \
> - ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
> + ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
> - TLS_TP_OFFSET)
>
> /* Compute the value for a DTPREL reloc. */
> diff --git a/sysdeps/microblaze/dl-machine.h b/sysdeps/microblaze/dl-machine.h
> index f1c4f7c519..a1cf1b66ce 100644
> --- a/sysdeps/microblaze/dl-machine.h
> +++ b/sysdeps/microblaze/dl-machine.h
> @@ -262,7 +262,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr = sym->st_value + sym_map->l_tls_offset + reloc->r_addend;
> + *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
> + + reloc->r_addend);
> }
> }
> #endif
> diff --git a/sysdeps/mips/dl-tls.h b/sysdeps/mips/dl-tls.h
> index c1859719f5..1f3adf8401 100644
> --- a/sysdeps/mips/dl-tls.h
> +++ b/sysdeps/mips/dl-tls.h
> @@ -35,7 +35,7 @@ typedef struct
>
> /* Compute the value for a GOTTPREL reloc. */
> #define TLS_TPREL_VALUE(sym_map, sym) \
> - ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
> + ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
>
> /* Compute the value for a DTPREL reloc. */
> #define TLS_DTPREL_VALUE(sym) \
> diff --git a/sysdeps/or1k/dl-machine.h b/sysdeps/or1k/dl-machine.h
> index c91f55554c..4680232a18 100644
> --- a/sysdeps/or1k/dl-machine.h
> +++ b/sysdeps/or1k/dl-machine.h
> @@ -250,13 +250,13 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> case R_OR1K_TLS_TPOFF:
> # ifdef RTLD_BOOTSTRAP
> *reloc_addr = sym->st_value + reloc->r_addend +
> - map->l_tls_offset - TLS_TCB_SIZE;
> + map->l_rw->l_tls_offset - TLS_TCB_SIZE;
> # else
> if (sym_map != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> *reloc_addr = sym->st_value + reloc->r_addend +
> - sym_map->l_tls_offset - TLS_TCB_SIZE;
> + sym_map->l_rw->l_tls_offset - TLS_TCB_SIZE;
> }
> # endif
> break;
> diff --git a/sysdeps/powerpc/dl-tls.h b/sysdeps/powerpc/dl-tls.h
> index 52d67a1fa1..8c0b9cbaff 100644
> --- a/sysdeps/powerpc/dl-tls.h
> +++ b/sysdeps/powerpc/dl-tls.h
> @@ -35,7 +35,7 @@ typedef struct
>
> /* Compute the value for a @tprel reloc. */
> #define TLS_TPREL_VALUE(sym_map, sym, reloc) \
> - ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
> + ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
> - TLS_TP_OFFSET)
>
> /* Compute the value for a @dtprel reloc. */
> diff --git a/sysdeps/powerpc/powerpc32/dl-machine.h b/sysdeps/powerpc/powerpc32/dl-machine.h
> index 9f95b23233..5d69719148 100644
> --- a/sysdeps/powerpc/powerpc32/dl-machine.h
> +++ b/sysdeps/powerpc/powerpc32/dl-machine.h
> @@ -354,7 +354,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (!NOT_BOOTSTRAP)
> {
> reloc_addr[0] = 0;
> - reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
> + reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
> + TLS_DTV_OFFSET);
> break;
> }
> @@ -368,7 +368,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> {
> reloc_addr[0] = 0;
> /* Set up for local dynamic. */
> - reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
> + reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
> + TLS_DTV_OFFSET);
> break;
> }
> diff --git a/sysdeps/powerpc/powerpc64/dl-machine.h b/sysdeps/powerpc/powerpc64/dl-machine.h
> index d8d7c8b763..116adc079d 100644
> --- a/sysdeps/powerpc/powerpc64/dl-machine.h
> +++ b/sysdeps/powerpc/powerpc64/dl-machine.h
> @@ -748,7 +748,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> {
> #ifdef RTLD_BOOTSTRAP
> reloc_addr[0] = 0;
> - reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
> + reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
> + TLS_DTV_OFFSET);
> return;
> #else
> @@ -762,7 +762,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> {
> reloc_addr[0] = 0;
> /* Set up for local dynamic. */
> - reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
> + reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
> + TLS_DTV_OFFSET);
> return;
> }
> diff --git a/sysdeps/riscv/dl-tls.h b/sysdeps/riscv/dl-tls.h
> index b8931a0fa5..6d6ccf88a6 100644
> --- a/sysdeps/riscv/dl-tls.h
> +++ b/sysdeps/riscv/dl-tls.h
> @@ -35,7 +35,7 @@ typedef struct
>
> /* Compute the value for a GOTTPREL reloc. */
> #define TLS_TPREL_VALUE(sym_map, sym) \
> - ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
> + ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
>
> /* Compute the value for a DTPREL reloc. */
> #define TLS_DTPREL_VALUE(sym) \
> diff --git a/sysdeps/s390/s390-32/dl-machine.h b/sysdeps/s390/s390-32/dl-machine.h
> index d317f679d1..a0e008f459 100644
> --- a/sysdeps/s390/s390-32/dl-machine.h
> +++ b/sysdeps/s390/s390-32/dl-machine.h
> @@ -339,7 +339,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> case R_390_TLS_TPOFF:
> /* The offset is negative, forward from the thread pointer. */
> #ifdef RTLD_BOOTSTRAP
> - *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
> + *reloc_addr = (sym->st_value + reloc->r_addend
> + - map->l_rw->l_tls_offset);
> #else
> /* We know the offset of the object the symbol is contained in.
> It is a negative value which will be added to the
> @@ -348,7 +349,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> {
> CHECK_STATIC_TLS (map, sym_map);
> *reloc_addr = (sym->st_value + reloc->r_addend
> - - sym_map->l_tls_offset);
> + - sym_map->l_rw->l_tls_offset);
> }
> #endif
> break;
> diff --git a/sysdeps/s390/s390-64/dl-machine.h b/sysdeps/s390/s390-64/dl-machine.h
> index d6028630b7..5900d12332 100644
> --- a/sysdeps/s390/s390-64/dl-machine.h
> +++ b/sysdeps/s390/s390-64/dl-machine.h
> @@ -321,7 +321,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> case R_390_TLS_TPOFF:
> /* The offset is negative, forward from the thread pointer. */
> #ifdef RTLD_BOOTSTRAP
> - *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
> + *reloc_addr = (sym->st_value + reloc->r_addend
> + - map->l_rw->l_tls_offset);
> #else
> /* We know the offset of the object the symbol is contained in.
> It is a negative value which will be added to the
> @@ -330,7 +331,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> {
> CHECK_STATIC_TLS (map, sym_map);
> *reloc_addr = (sym->st_value + reloc->r_addend
> - - sym_map->l_tls_offset);
> + - sym_map->l_rw->l_tls_offset);
> }
> #endif
> break;
> diff --git a/sysdeps/sh/dl-machine.h b/sysdeps/sh/dl-machine.h
> index 2c07474bb4..e93431d107 100644
> --- a/sysdeps/sh/dl-machine.h
> +++ b/sysdeps/sh/dl-machine.h
> @@ -363,7 +363,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> case R_SH_TLS_TPOFF32:
> /* The offset is positive, afterward from the thread pointer. */
> #ifdef RTLD_BOOTSTRAP
> - *reloc_addr = map->l_tls_offset + sym->st_value + reloc->r_addend;
> + *reloc_addr = (map->l_rw->l_tls_offset + sym->st_value
> + + reloc->r_addend);
> #else
> /* We know the offset of object the symbol is contained in.
> It is a positive value which will be added to the thread
> @@ -372,8 +373,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr = sym_map->l_tls_offset + sym->st_value
> - + reloc->r_addend;
> + *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
> + + reloc->r_addend);
> }
> #endif
> break;
> diff --git a/sysdeps/sparc/sparc32/dl-machine.h b/sysdeps/sparc/sparc32/dl-machine.h
> index 0b49766801..130db5aef0 100644
> --- a/sysdeps/sparc/sparc32/dl-machine.h
> +++ b/sysdeps/sparc/sparc32/dl-machine.h
> @@ -371,7 +371,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr = sym->st_value - sym_map->l_tls_offset
> + *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
> + reloc->r_addend;
> }
> break;
> @@ -381,7 +381,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - value = sym->st_value - sym_map->l_tls_offset
> + value = sym->st_value - sym_map->l_rw->l_tls_offset
> + reloc->r_addend;
> if (r_type == R_SPARC_TLS_LE_HIX22)
> *reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
> diff --git a/sysdeps/sparc/sparc64/dl-machine.h b/sysdeps/sparc/sparc64/dl-machine.h
> index b1ccf2320c..2309eea151 100644
> --- a/sysdeps/sparc/sparc64/dl-machine.h
> +++ b/sysdeps/sparc/sparc64/dl-machine.h
> @@ -387,7 +387,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - *reloc_addr = sym->st_value - sym_map->l_tls_offset
> + *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
> + reloc->r_addend;
> }
> break;
> @@ -397,7 +397,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
> if (sym != NULL)
> {
> CHECK_STATIC_TLS (map, sym_map);
> - value = sym->st_value - sym_map->l_tls_offset
> + value = sym->st_value - sym_map->l_rw->l_tls_offset
> + reloc->r_addend;
> if (r_type == R_SPARC_TLS_LE_HIX22)
> *(unsigned int *)reloc_addr =
> diff --git a/sysdeps/x86/dl-prop.h b/sysdeps/x86/dl-prop.h
> index 8625751427..87033831f2 100644
> --- a/sysdeps/x86/dl-prop.h
> +++ b/sysdeps/x86/dl-prop.h
> @@ -40,7 +40,7 @@ dl_isa_level_check (struct link_map *m, const char *program)
> l = m->l_initfini[i];
>
> /* Skip ISA level check if functions have been executed. */
> - if (l->l_init_called)
> + if (l->l_rw->l_init_called)
> continue;
>
> #ifdef SHARED
> diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
> index 572a1a7395..681e2bc482 100644
> --- a/sysdeps/x86_64/dl-machine.h
> +++ b/sysdeps/x86_64/dl-machine.h
> @@ -383,7 +383,8 @@ and creates an unsatisfiable circular dependency.\n",
> else
> # endif
> {
> - td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
> + td->arg = (void*)(sym->st_value
> + - sym_map->l_rw->l_tls_offset
> + reloc->r_addend);
> td->entry = _dl_tlsdesc_return;
> }
> @@ -399,7 +400,7 @@ and creates an unsatisfiable circular dependency.\n",
> It is a negative value which will be added to the
> thread pointer. */
> value = (sym->st_value + reloc->r_addend
> - - sym_map->l_tls_offset);
> + - sym_map->l_rw->l_tls_offset);
> # ifdef __ILP32__
> /* The symbol and addend values are 32 bits but the GOT
> entry is 64 bits wide and the whole 64-bit entry is used
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -29,7 +29,7 @@ _dl_call_fini (void *closure_map)
_dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", map->l_name, map->l_ns);
/* Make sure nothing happens if we are called twice. */
- map->l_init_called = 0;
+ map->l_rw->l_init_called = 0;
ElfW(Dyn) *fini_array = map->l_info[DT_FINI_ARRAY];
if (fini_array != NULL)
@@ -109,23 +109,23 @@ void
_dl_close_worker (struct link_map *map, bool force)
{
/* One less direct use. */
- --map->l_direct_opencount;
+ --map->l_rw->l_direct_opencount;
/* If _dl_close is called recursively (some destructor call dlclose),
just record that the parent _dl_close will need to do garbage collection
again and return. */
static enum { not_pending, pending, rerun } dl_close_state;
- if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
+ if (map->l_rw->l_direct_opencount > 0 || map->l_type != lt_loaded
|| dl_close_state != not_pending)
{
- if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
+ if (map->l_rw->l_direct_opencount == 0 && map->l_type == lt_loaded)
dl_close_state = rerun;
/* There are still references to this object. Do nothing more. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
- map->l_name, map->l_direct_opencount);
+ map->l_name, map->l_rw->l_direct_opencount);
return;
}
@@ -147,7 +147,7 @@ _dl_close_worker (struct link_map *map, bool force)
{
l->l_map_used = 0;
l->l_map_done = 0;
- l->l_idx = idx;
+ l->l_rw->l_idx = idx;
maps[idx] = l;
++idx;
}
@@ -157,10 +157,10 @@ _dl_close_worker (struct link_map *map, bool force)
The map variable is NULL after a retry. */
if (map != NULL)
{
- maps[map->l_idx] = maps[0];
- maps[map->l_idx]->l_idx = map->l_idx;
+ maps[map->l_rw->l_idx] = maps[0];
+ maps[map->l_rw->l_idx]->l_rw->l_idx = map->l_rw->l_idx;
maps[0] = map;
- maps[0]->l_idx = 0;
+ maps[0]->l_rw->l_idx = 0;
}
/* Keep track of the lowest index link map we have covered already. */
@@ -175,11 +175,11 @@ _dl_close_worker (struct link_map *map, bool force)
/* Check whether this object is still used. */
if (l->l_type == lt_loaded
- && l->l_direct_opencount == 0
- && !l->l_nodelete_active
+ && l->l_rw->l_direct_opencount == 0
+ && !l->l_rw->l_nodelete_active
/* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
acquire is sufficient and correct. */
- && atomic_load_acquire (&l->l_tls_dtor_count) == 0
+ && atomic_load_acquire (&l->l_rw->l_tls_dtor_count) == 0
&& !l->l_map_used)
continue;
@@ -187,7 +187,7 @@ _dl_close_worker (struct link_map *map, bool force)
l->l_map_used = 1;
l->l_map_done = 1;
/* Signal the object is still needed. */
- l->l_idx = IDX_STILL_USED;
+ l->l_rw->l_idx = IDX_STILL_USED;
/* Mark all dependencies as used. */
if (l->l_initfini != NULL)
@@ -197,9 +197,10 @@ _dl_close_worker (struct link_map *map, bool force)
struct link_map **lp = &l->l_initfini[1];
while (*lp != NULL)
{
- if ((*lp)->l_idx != IDX_STILL_USED)
+ if ((*lp)->l_rw->l_idx != IDX_STILL_USED)
{
- assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
+ assert ((*lp)->l_rw->l_idx >= 0
+ && (*lp)->l_rw->l_idx < nloaded);
if (!(*lp)->l_map_used)
{
@@ -208,8 +209,8 @@ _dl_close_worker (struct link_map *map, bool force)
already processed it, then we need to go back
and process again from that point forward to
ensure we keep all of its dependencies also. */
- if ((*lp)->l_idx - 1 < done_index)
- done_index = (*lp)->l_idx - 1;
+ if ((*lp)->l_rw->l_idx - 1 < done_index)
+ done_index = (*lp)->l_rw->l_idx - 1;
}
}
@@ -217,20 +218,20 @@ _dl_close_worker (struct link_map *map, bool force)
}
}
/* And the same for relocation dependencies. */
- if (l->l_reldeps != NULL)
- for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
+ if (l->l_rw->l_reldeps != NULL)
+ for (unsigned int j = 0; j < l->l_rw->l_reldeps->act; ++j)
{
- struct link_map *jmap = l->l_reldeps->list[j];
+ struct link_map *jmap = l->l_rw->l_reldeps->list[j];
- if (jmap->l_idx != IDX_STILL_USED)
+ if (jmap->l_rw->l_idx != IDX_STILL_USED)
{
- assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
+ assert (jmap->l_rw->l_idx >= 0 && jmap->l_rw->l_idx < nloaded);
if (!jmap->l_map_used)
{
jmap->l_map_used = 1;
- if (jmap->l_idx - 1 < done_index)
- done_index = jmap->l_idx - 1;
+ if (jmap->l_rw->l_idx - 1 < done_index)
+ done_index = jmap->l_rw->l_idx - 1;
}
}
}
@@ -255,12 +256,12 @@ _dl_close_worker (struct link_map *map, bool force)
if (!imap->l_map_used)
{
- assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
+ assert (imap->l_type == lt_loaded && !imap->l_rw->l_nodelete_active);
/* Call its termination function. Do not do it for
half-cooked objects. Temporarily disable exception
handling, so that errors are fatal. */
- if (imap->l_init_called)
+ if (imap->l_rw->l_init_called)
_dl_catch_exception (NULL, _dl_call_fini, imap);
#ifdef SHARED
@@ -327,7 +328,7 @@ _dl_close_worker (struct link_map *map, bool force)
((char *) imap->l_scope[cnt]
- offsetof (struct link_map, l_searchlist));
assert (tmap->l_ns == nsid);
- if (tmap->l_idx == IDX_STILL_USED)
+ if (tmap->l_rw->l_idx == IDX_STILL_USED)
++remain;
else
removed_any = true;
@@ -372,7 +373,7 @@ _dl_close_worker (struct link_map *map, bool force)
struct link_map *tmap = (struct link_map *)
((char *) imap->l_scope[cnt]
- offsetof (struct link_map, l_searchlist));
- if (tmap->l_idx != IDX_STILL_USED)
+ if (tmap->l_rw->l_idx != IDX_STILL_USED)
{
/* Remove the scope. Or replace with own map's
scope. */
@@ -417,7 +418,7 @@ _dl_close_worker (struct link_map *map, bool force)
/* The loader is gone, so mark the object as not having one.
Note: l_idx != IDX_STILL_USED -> object will be removed. */
if (imap->l_loader != NULL
- && imap->l_loader->l_idx != IDX_STILL_USED)
+ && imap->l_loader->l_rw->l_idx != IDX_STILL_USED)
imap->l_loader = NULL;
/* Remember where the first dynamically loaded object is. */
@@ -507,14 +508,14 @@ _dl_close_worker (struct link_map *map, bool force)
if (GL(dl_tls_dtv_slotinfo_list) != NULL
&& ! remove_slotinfo (imap->l_tls_modid,
GL(dl_tls_dtv_slotinfo_list), 0,
- imap->l_init_called))
+ imap->l_rw->l_init_called))
/* All dynamically loaded modules with TLS are unloaded. */
/* Can be read concurrently. */
atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
GL(dl_tls_static_nelem));
- if (imap->l_tls_offset != NO_TLS_OFFSET
- && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
+ if (imap->l_rw->l_tls_offset != NO_TLS_OFFSET
+ && imap->l_rw->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
{
/* Collect a contiguous chunk built from the objects in
this search list, going in either direction. When the
@@ -522,19 +523,19 @@ _dl_close_worker (struct link_map *map, bool force)
reclaim it. */
#if TLS_TCB_AT_TP
if (tls_free_start == NO_TLS_OFFSET
- || (size_t) imap->l_tls_offset == tls_free_start)
+ || (size_t) imap->l_rw->l_tls_offset == tls_free_start)
{
/* Extend the contiguous chunk being reclaimed. */
tls_free_start
- = imap->l_tls_offset - imap->l_tls_blocksize;
+ = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
if (tls_free_end == NO_TLS_OFFSET)
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
}
- else if (imap->l_tls_offset - imap->l_tls_blocksize
+ else if (imap->l_rw->l_tls_offset - imap->l_tls_blocksize
== tls_free_end)
/* Extend the chunk backwards. */
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
else
{
/* This isn't contiguous with the last chunk freed.
@@ -543,19 +544,20 @@ _dl_close_worker (struct link_map *map, bool force)
if (tls_free_end == GL(dl_tls_static_used))
{
GL(dl_tls_static_used) = tls_free_start;
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
tls_free_start
= tls_free_end - imap->l_tls_blocksize;
}
- else if ((size_t) imap->l_tls_offset
+ else if ((size_t) imap->l_rw->l_tls_offset
== GL(dl_tls_static_used))
GL(dl_tls_static_used)
- = imap->l_tls_offset - imap->l_tls_blocksize;
- else if (tls_free_end < (size_t) imap->l_tls_offset)
+ = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
+ else if (tls_free_end
+ < (size_t) imap->l_rw->l_tls_offset)
{
/* We pick the later block. It has a chance to
be freed. */
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
tls_free_start
= tls_free_end - imap->l_tls_blocksize;
}
@@ -564,34 +566,37 @@ _dl_close_worker (struct link_map *map, bool force)
if (tls_free_start == NO_TLS_OFFSET)
{
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = (imap->l_tls_offset
+ tls_free_end = (imap->l_rw->l_tls_offset
+ imap->l_tls_blocksize);
}
else if (imap->l_tls_firstbyte_offset == tls_free_end)
/* Extend the contiguous chunk being reclaimed. */
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
- else if (imap->l_tls_offset + imap->l_tls_blocksize
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
+ else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
== tls_free_start)
/* Extend the chunk backwards. */
tls_free_start = imap->l_tls_firstbyte_offset;
/* This isn't contiguous with the last chunk freed.
One of them will be leaked unless we can free
one block right away. */
- else if (imap->l_tls_offset + imap->l_tls_blocksize
+ else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
== GL(dl_tls_static_used))
GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
else if (tls_free_end == GL(dl_tls_static_used))
{
GL(dl_tls_static_used) = tls_free_start;
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
}
else if (tls_free_end < imap->l_tls_firstbyte_offset)
{
/* We pick the later block. It has a chance to
be freed. */
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
}
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
@@ -663,7 +668,8 @@ _dl_close_worker (struct link_map *map, bool force)
if (imap->l_origin != (char *) -1)
free ((char *) imap->l_origin);
- free (imap->l_reldeps);
+ free (imap->l_rw->l_reldeps);
+ free (imap->l_rw);
/* Print debugging message. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
@@ -769,7 +775,7 @@ _dl_close (void *_map)
before we took the lock. There is no way to detect this (see below)
so we proceed assuming this isn't the case. First see whether we
can remove the object at all. */
- if (__glibc_unlikely (map->l_nodelete_active))
+ if (__glibc_unlikely (map->l_rw->l_nodelete_active))
{
/* Nope. Do nothing. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
@@ -786,7 +792,7 @@ _dl_close (void *_map)
should be a detectable case and given that dlclose should be threadsafe
we need this to be a reliable detection.
This is bug 20990. */
- if (__builtin_expect (map->l_direct_opencount, 1) == 0)
+ if (__builtin_expect (map->l_rw->l_direct_opencount, 1) == 0)
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
_dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
@@ -478,20 +478,20 @@ _dl_map_object_deps (struct link_map *map,
/* Maybe we can remove some relocation dependencies now. */
struct link_map_reldeps *l_reldeps = NULL;
- if (map->l_reldeps != NULL)
+ if (map->l_rw->l_reldeps != NULL)
{
for (i = 0; i < nlist; ++i)
map->l_searchlist.r_list[i]->l_reserved = 1;
/* Avoid removing relocation dependencies of the main binary. */
map->l_reserved = 0;
- struct link_map **list = &map->l_reldeps->list[0];
- for (i = 0; i < map->l_reldeps->act; ++i)
+ struct link_map **list = &map->l_rw->l_reldeps->list[0];
+ for (i = 0; i < map->l_rw->l_reldeps->act; ++i)
if (list[i]->l_reserved)
{
/* Need to allocate new array of relocation dependencies. */
l_reldeps = malloc (sizeof (*l_reldeps)
- + map->l_reldepsmax
+ + map->l_rw->l_reldepsmax
* sizeof (struct link_map *));
if (l_reldeps == NULL)
/* Bad luck, keep the reldeps duplicated between
@@ -502,7 +502,7 @@ _dl_map_object_deps (struct link_map *map,
unsigned int j = i;
memcpy (&l_reldeps->list[0], &list[0],
i * sizeof (struct link_map *));
- for (i = i + 1; i < map->l_reldeps->act; ++i)
+ for (i = i + 1; i < map->l_rw->l_reldeps->act; ++i)
if (!list[i]->l_reserved)
l_reldeps->list[j++] = list[i];
l_reldeps->act = j;
@@ -547,8 +547,8 @@ _dl_map_object_deps (struct link_map *map,
if (l_reldeps != NULL)
{
atomic_write_barrier ();
- void *old_l_reldeps = map->l_reldeps;
- map->l_reldeps = l_reldeps;
+ void *old_l_reldeps = map->l_rw->l_reldeps;
+ map->l_rw->l_reldeps = l_reldeps;
_dl_scope_free (old_l_reldeps);
}
if (old_l_initfini != NULL)
@@ -508,7 +508,7 @@ _dlfo_process_initial (void)
if (l != main_map && l == l->l_real)
{
/* lt_library link maps are implicitly NODELETE. */
- if (l->l_type == lt_library || l->l_nodelete_active)
+ if (l->l_type == lt_library || l->l_rw->l_nodelete_active)
{
if (_dlfo_nodelete_mappings != NULL)
/* Second pass only. */
@@ -78,12 +78,12 @@ _dl_fini (void)
assert (i < nloaded);
maps[i] = l;
- l->l_idx = i;
+ l->l_rw->l_idx = i;
++i;
/* Bump l_direct_opencount of all objects so that they
are not dlclose()ed from underneath us. */
- ++l->l_direct_opencount;
+ ++l->l_rw->l_direct_opencount;
}
else
/* Used below to call la_objclose for the ld.so proxy
@@ -115,7 +115,7 @@ _dl_fini (void)
{
struct link_map *l = maps[i];
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
{
_dl_call_fini (l);
#ifdef SHARED
@@ -125,7 +125,7 @@ _dl_fini (void)
}
/* Correct the previous increment. */
- --l->l_direct_opencount;
+ --l->l_rw->l_direct_opencount;
}
if (proxy_link_map != NULL)
@@ -34,13 +34,13 @@ call_init (struct link_map *l, int argc, char **argv, char **env)
need relocation.) */
assert (l->l_relocated || l->l_type == lt_executable);
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
/* This object is all done. */
return;
/* Avoid handling this constructor again in case we have a circular
dependency. */
- l->l_init_called = 1;
+ l->l_rw->l_init_called = 1;
/* Check for object which constructors we do not run here. */
if (__builtin_expect (l->l_name[0], 'a') == '\0'
@@ -175,9 +175,9 @@ static void
mark_nodelete (struct link_map *map, int flags)
{
if (flags & DL_LOOKUP_FOR_RELOCATE)
- map->l_nodelete_pending = true;
+ map->l_rw->l_nodelete_pending = true;
else
- map->l_nodelete_active = true;
+ map->l_rw->l_nodelete_active = true;
}
/* Return true if MAP is marked as NODELETE according to the lookup
@@ -187,8 +187,8 @@ is_nodelete (struct link_map *map, int flags)
{
/* Non-pending NODELETE always counts. Pending NODELETE only counts
during initial relocation processing. */
- return map->l_nodelete_active
- || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_nodelete_pending);
+ return map->l_rw->l_nodelete_active
+ || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_rw->l_nodelete_pending);
}
/* Utility function for do_lookup_x. Lookup an STB_GNU_UNIQUE symbol
@@ -532,7 +532,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
return 0;
struct link_map_reldeps *l_reldeps
- = atomic_forced_read (undef_map->l_reldeps);
+ = atomic_forced_read (undef_map->l_rw->l_reldeps);
/* Make sure l_reldeps is read before l_initfini. */
atomic_read_barrier ();
@@ -591,22 +591,22 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
/* Redo the l_reldeps check if undef_map's l_reldeps changed in
the mean time. */
- if (undef_map->l_reldeps != NULL)
+ if (undef_map->l_rw->l_reldeps != NULL)
{
- if (undef_map->l_reldeps != l_reldeps)
+ if (undef_map->l_rw->l_reldeps != l_reldeps)
{
- struct link_map **list = &undef_map->l_reldeps->list[0];
- l_reldepsact = undef_map->l_reldeps->act;
+ struct link_map **list = &undef_map->l_rw->l_reldeps->list[0];
+ l_reldepsact = undef_map->l_rw->l_reldeps->act;
for (i = 0; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
}
- else if (undef_map->l_reldeps->act > l_reldepsact)
+ else if (undef_map->l_rw->l_reldeps->act > l_reldepsact)
{
struct link_map **list
- = &undef_map->l_reldeps->list[0];
+ = &undef_map->l_rw->l_reldeps->list[0];
i = l_reldepsact;
- l_reldepsact = undef_map->l_reldeps->act;
+ l_reldepsact = undef_map->l_rw->l_reldeps->act;
for (; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
@@ -662,14 +662,14 @@ marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
}
/* Add the reference now. */
- if (__glibc_unlikely (l_reldepsact >= undef_map->l_reldepsmax))
+ if (__glibc_unlikely (l_reldepsact >= undef_map->l_rw->l_reldepsmax))
{
/* Allocate more memory for the dependency list. Since this
can never happen during the startup phase we can use
`realloc'. */
struct link_map_reldeps *newp;
- unsigned int max
- = undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
+ unsigned int max = (undef_map->l_rw->l_reldepsmax
+ ? undef_map->l_rw->l_reldepsmax * 2 : 10);
#ifdef RTLD_PREPARE_FOREIGN_CALL
RTLD_PREPARE_FOREIGN_CALL;
@@ -696,23 +696,23 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
else
{
if (l_reldepsact)
- memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
+ memcpy (&newp->list[0], &undef_map->l_rw->l_reldeps->list[0],
l_reldepsact * sizeof (struct link_map *));
newp->list[l_reldepsact] = map;
newp->act = l_reldepsact + 1;
atomic_write_barrier ();
- void *old = undef_map->l_reldeps;
- undef_map->l_reldeps = newp;
- undef_map->l_reldepsmax = max;
+ void *old = undef_map->l_rw->l_reldeps;
+ undef_map->l_rw->l_reldeps = newp;
+ undef_map->l_rw->l_reldepsmax = max;
if (old)
_dl_scope_free (old);
}
}
else
{
- undef_map->l_reldeps->list[l_reldepsact] = map;
+ undef_map->l_rw->l_reldeps->list[l_reldepsact] = map;
atomic_write_barrier ();
- undef_map->l_reldeps->act = l_reldepsact + 1;
+ undef_map->l_rw->l_reldeps->act = l_reldepsact + 1;
}
/* Display information if we are debugging. */
@@ -89,15 +89,20 @@ _dl_new_object (char *realname, const char *libname, int type,
# define audit_space 0
#endif
- new = (struct link_map *) calloc (sizeof (*new) + audit_space
- + sizeof (struct link_map *)
- + sizeof (*newname) + libname_len, 1);
+ new = calloc (sizeof (*new)
+ + sizeof (struct link_map_private *)
+ + sizeof (*newname) + libname_len, 1);
if (new == NULL)
return NULL;
+ new->l_rw = calloc (1, sizeof (*new->l_rw) + audit_space);
+ if (new->l_rw == NULL)
+ {
+ free (new);
+ return NULL;
+ }
new->l_real = new;
- new->l_symbolic_searchlist.r_list = (struct link_map **) ((char *) (new + 1)
- + audit_space);
+ new->l_symbolic_searchlist.r_list = (struct link_map **) ((char *) (new + 1));
new->l_libname = newname
= (struct libname_list *) (new->l_symbolic_searchlist.r_list + 1);
@@ -131,7 +136,7 @@ _dl_new_object (char *realname, const char *libname, int type,
new->l_used = 1;
new->l_loader = loader;
#if NO_TLS_OFFSET != 0
- new->l_tls_offset = NO_TLS_OFFSET;
+ new->l_rw->l_tls_offset = NO_TLS_OFFSET;
#endif
new->l_ns = nsid;
@@ -261,7 +261,7 @@ resize_scopes (struct link_map *new)
/* If the initializer has been called already, the object has
not been loaded here and now. */
- if (imap->l_init_called && imap->l_type == lt_loaded)
+ if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
{
if (scope_has_map (imap, new))
/* Avoid duplicates. */
@@ -325,7 +325,7 @@ update_scopes (struct link_map *new)
struct link_map *imap = new->l_searchlist.r_list[i];
int from_scope = 0;
- if (imap->l_init_called && imap->l_type == lt_loaded)
+ if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
{
if (scope_has_map (imap, new))
/* Avoid duplicates. */
@@ -424,7 +424,7 @@ activate_nodelete (struct link_map *new)
NODELETE status for objects outside the local scope. */
for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
l = l->l_next)
- if (l->l_nodelete_pending)
+ if (l->l_rw->l_nodelete_pending)
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("activating NODELETE for %s [%lu]\n",
@@ -433,11 +433,11 @@ activate_nodelete (struct link_map *new)
/* The flag can already be true at this point, e.g. a signal
handler may have triggered lazy binding and set NODELETE
status immediately. */
- l->l_nodelete_active = true;
+ l->l_rw->l_nodelete_active = true;
/* This is just a debugging aid, to indicate that
activate_nodelete has run for this map. */
- l->l_nodelete_pending = false;
+ l->l_rw->l_nodelete_pending = false;
}
}
@@ -476,7 +476,7 @@ _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
_dl_start_profile ();
/* Prevent unloading the object. */
- GL(dl_profile_map)->l_nodelete_active = true;
+ GL(dl_profile_map)->l_rw->l_nodelete_active = true;
}
}
else
@@ -505,7 +505,7 @@ is_already_fully_open (struct link_map *map, int mode)
/* The object is already in the global scope if requested. */
&& (!(mode & RTLD_GLOBAL) || map->l_global)
/* The object is already NODELETE if requested. */
- && (!(mode & RTLD_NODELETE) || map->l_nodelete_active));
+ && (!(mode & RTLD_NODELETE) || map->l_rw->l_nodelete_active));
}
static void
@@ -547,7 +547,7 @@ dl_open_worker_begin (void *a)
return;
/* This object is directly loaded. */
- ++new->l_direct_opencount;
+ ++new->l_rw->l_direct_opencount;
/* It was already open. See is_already_fully_open above. */
if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
@@ -555,7 +555,8 @@ dl_open_worker_begin (void *a)
/* Let the user know about the opencount. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
- new->l_name, new->l_ns, new->l_direct_opencount);
+ new->l_name, new->l_ns,
+ new->l_rw->l_direct_opencount);
#ifdef SHARED
/* No relocation processing on this execution path. But
@@ -576,10 +577,10 @@ dl_open_worker_begin (void *a)
if (__glibc_unlikely (mode & RTLD_NODELETE))
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
- && !new->l_nodelete_active)
+ && !new->l_rw->l_nodelete_active)
_dl_debug_printf ("marking %s [%lu] as NODELETE\n",
new->l_name, new->l_ns);
- new->l_nodelete_active = true;
+ new->l_rw->l_nodelete_active = true;
}
/* Finalize the addition to the global scope. */
@@ -592,7 +593,7 @@ dl_open_worker_begin (void *a)
/* Schedule NODELETE marking for the directly loaded object if
requested. */
if (__glibc_unlikely (mode & RTLD_NODELETE))
- new->l_nodelete_pending = true;
+ new->l_rw->l_nodelete_pending = true;
/* Load that object's dependencies. */
_dl_map_object_deps (new, NULL, 0, 0,
@@ -795,7 +796,7 @@ dl_open_worker (void *a)
/* Let the user know about the opencount. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
- new->l_name, new->l_ns, new->l_direct_opencount);
+ new->l_name, new->l_ns, new->l_rw->l_direct_opencount);
}
void *
@@ -881,7 +882,7 @@ no more namespaces available for dlmopen()"));
if (is_already_fully_open (args.map, mode))
{
/* We can use the fast path. */
- ++args.map->l_direct_opencount;
+ ++args.map->l_rw->l_direct_opencount;
__rtld_lock_unlock_recursive (GL(dl_load_lock));
return args.map;
}
@@ -41,7 +41,7 @@
dynamically loaded. This can only work if there is enough surplus in
the static TLS area already allocated for each running thread. If this
object's TLS segment is too big to fit, we fail with -1. If it fits,
- we set MAP->l_tls_offset and return 0.
+ we set MAP->l_rw->l_tls_offset and return 0.
A portion of the surplus static TLS can be optionally used to optimize
dynamic TLS access (with TLSDESC or powerpc TLS optimizations).
If OPTIONAL is true then TLS is allocated for such optimization and
@@ -53,7 +53,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
{
/* If we've already used the variable with dynamic access, or if the
alignment requirements are too high, fail. */
- if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| map->l_tls_align > GLRO (dl_tls_static_align))
{
fail:
@@ -81,7 +81,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
size_t offset = GL(dl_tls_static_used) + use;
- map->l_tls_offset = GL(dl_tls_static_used) = offset;
+ map->l_rw->l_tls_offset = GL(dl_tls_static_used) = offset;
#elif TLS_DTV_AT_TP
/* dl_tls_static_used includes the TCB at the beginning. */
size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
@@ -100,7 +100,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
else if (optional)
GL(dl_tls_static_optional) -= use;
- map->l_tls_offset = offset;
+ map->l_rw->l_tls_offset = offset;
map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
GL(dl_tls_static_used) = used;
#else
@@ -134,7 +134,7 @@ void
__attribute_noinline__
_dl_allocate_static_tls (struct link_map *map)
{
- if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| _dl_try_allocate_static_tls (map, false))
{
_dl_signal_error (0, map->l_name, NULL, N_("\
@@ -150,9 +150,10 @@ void
_dl_nothread_init_static_tls (struct link_map *map)
{
#if TLS_TCB_AT_TP
- void *dest = (char *) THREAD_SELF - map->l_tls_offset;
+ void *dest = (char *) THREAD_SELF - map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = ((char *) THREAD_SELF + map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -51,7 +51,7 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
{
/* Do not handle ld.so in secondary namespaces and objects which
are not removed. */
- if (thisp != thisp->l_real || thisp->l_idx == -1)
+ if (thisp != thisp->l_real || thisp->l_rw->l_idx == -1)
goto skip;
}
@@ -87,10 +87,10 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
goto next;
}
- if (__glibc_unlikely (for_fini && maps[k]->l_reldeps != NULL))
+ if (__glibc_unlikely (for_fini && maps[k]->l_rw->l_reldeps != NULL))
{
- unsigned int m = maps[k]->l_reldeps->act;
- struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
+ unsigned int m = maps[k]->l_rw->l_reldeps->act;
+ struct link_map **relmaps = &maps[k]->l_rw->l_reldeps->list[0];
/* Look through the relocation dependencies of the object. */
while (m-- > 0)
@@ -137,32 +137,32 @@ dfs_traversal (struct link_map ***rpo, struct link_map *map,
{
/* _dl_map_object_deps ignores l_faked objects when calculating the
number of maps before calling _dl_sort_maps, ignore them as well. */
- if (map->l_visited || map->l_faked)
+ if (map->l_rw->l_visited || map->l_faked)
return;
- map->l_visited = 1;
+ map->l_rw->l_visited = 1;
if (map->l_initfini)
{
for (int i = 0; map->l_initfini[i] != NULL; i++)
{
struct link_map *dep = map->l_initfini[i];
- if (dep->l_visited == 0
+ if (dep->l_rw->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
}
}
- if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
+ if (__glibc_unlikely (do_reldeps != NULL && map->l_rw->l_reldeps != NULL))
{
/* Indicate that we encountered relocation dependencies during
traversal. */
*do_reldeps = true;
- for (int m = map->l_reldeps->act - 1; m >= 0; m--)
+ for (int m = map->l_rw->l_reldeps->act - 1; m >= 0; m--)
{
- struct link_map *dep = map->l_reldeps->list[m];
- if (dep->l_visited == 0
+ struct link_map *dep = map->l_rw->l_reldeps->list[m];
+ if (dep->l_rw->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
}
@@ -181,7 +181,7 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
{
struct link_map *first_map = maps[0];
for (int i = nmaps - 1; i >= 0; i--)
- maps[i]->l_visited = 0;
+ maps[i]->l_rw->l_visited = 0;
/* We apply DFS traversal for each of maps[i] until the whole total order
is found and we're at the start of the Reverse-Postorder (RPO) sequence,
@@ -244,7 +244,7 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
if (do_reldeps)
{
for (int i = nmaps - 1; i >= 0; i--)
- rpo[i]->l_visited = 0;
+ rpo[i]->l_rw->l_visited = 0;
struct link_map **maps_head = &maps[nmaps];
for (int i = nmaps - 1; i >= 0; i--)
@@ -29,8 +29,8 @@
can't be done, we fall back to the error that DF_STATIC_TLS is
intended to produce. */
#define HAVE_STATIC_TLS(map, sym_map) \
- (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET \
- && ((sym_map)->l_tls_offset \
+ (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET \
+ && ((sym_map)->l_rw->l_tls_offset \
!= FORCED_DYNAMIC_TLS_OFFSET), 1))
#define CHECK_STATIC_TLS(map, sym_map) \
@@ -40,9 +40,9 @@
} while (0)
#define TRY_STATIC_TLS(map, sym_map) \
- (__builtin_expect ((sym_map)->l_tls_offset \
+ (__builtin_expect ((sym_map)->l_rw->l_tls_offset \
!= FORCED_DYNAMIC_TLS_OFFSET, 1) \
- && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
+ && (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET, 1)\
|| _dl_try_allocate_static_tls (sym_map, true) == 0))
int _dl_try_allocate_static_tls (struct link_map *map, bool optional)
@@ -82,6 +82,7 @@ int _dl_bind_not;
static struct link_map _dl_main_map =
{
.l_name = (char *) "",
+ .l_rw = &(struct link_map_rw) { .l_tls_offset = NO_TLS_OFFSET, },
.l_real = &_dl_main_map,
.l_ns = LM_ID_BASE,
.l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
@@ -98,7 +99,6 @@ static struct link_map _dl_main_map =
.l_scope = _dl_main_map.l_scope_mem,
.l_local_scope = { &_dl_main_map.l_searchlist },
.l_used = 1,
- .l_tls_offset = NO_TLS_OFFSET,
.l_serial = 1,
};
@@ -299,7 +299,7 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- l->l_tls_offset = off;
+ l->l_rw->l_tls_offset = off;
continue;
}
}
@@ -316,7 +316,7 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- l->l_tls_offset = off;
+ l->l_rw->l_tls_offset = off;
}
/* Insert the extra TLS block after the last TLS block. */
@@ -378,9 +378,9 @@ _dl_determine_tlsoffset (void)
off += l->l_tls_align;
if (off + l->l_tls_blocksize - firstbyte <= freetop)
{
- l->l_tls_offset = off - firstbyte;
+ l->l_rw->l_tls_offset = off - firstbyte;
freebottom = (off + l->l_tls_blocksize
- - firstbyte);
+- - firstbyte);
continue;
}
}
@@ -389,7 +389,7 @@ _dl_determine_tlsoffset (void)
if (off - offset < firstbyte)
off += l->l_tls_align;
- l->l_tls_offset = off - firstbyte;
+ l->l_rw->l_tls_offset = off - firstbyte;
if (off - firstbyte - offset > freetop - freebottom)
{
freebottom = offset;
@@ -645,17 +645,17 @@ _dl_allocate_tls_init (void *result, bool main_thread)
dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
dtv[map->l_tls_modid].pointer.to_free = NULL;
- if (map->l_tls_offset == NO_TLS_OFFSET
- || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
+ if (map->l_rw->l_tls_offset == NO_TLS_OFFSET
+ || map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
continue;
assert (map->l_tls_modid == total + cnt);
assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
#if TLS_TCB_AT_TP
- assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
- dest = (char *) result - map->l_tls_offset;
+ assert ((size_t) map->l_rw->l_tls_offset >= map->l_tls_blocksize);
+ dest = (char *) result - map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- dest = (char *) result + map->l_tls_offset;
+ dest = (char *) result + map->l_rw->l_tls_offset;
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -959,22 +959,23 @@ tls_get_addr_tail (tls_index *ti, dtv_t *dtv, struct link_map *the_map)
variable into static storage, we'll wait until the address in the
static TLS block is set up, and use that. If we're undecided
yet, make sure we make the decision holding the lock as well. */
- if (__glibc_unlikely (the_map->l_tls_offset
+ if (__glibc_unlikely (the_map->l_rw->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET))
{
__rtld_lock_lock_recursive (GL(dl_load_tls_lock));
- if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
+ if (__glibc_likely (the_map->l_rw->l_tls_offset == NO_TLS_OFFSET))
{
- the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
+ the_map->l_rw->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
__rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
}
- else if (__glibc_likely (the_map->l_tls_offset
+ else if (__glibc_likely (the_map->l_rw->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET))
{
#if TLS_TCB_AT_TP
- void *p = (char *) THREAD_SELF - the_map->l_tls_offset;
+ void *p = (char *) THREAD_SELF - the_map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *p = ((char *) THREAD_SELF + the_map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -1223,9 +1224,9 @@ static inline void __attribute__((always_inline))
init_one_static_tls (struct pthread *curp, struct link_map *map)
{
# if TLS_TCB_AT_TP
- void *dest = (char *) curp - map->l_tls_offset;
+ void *dest = (char *) curp - map->l_rw->l_tls_offset;
# elif TLS_DTV_AT_TP
- void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = (char *) curp + map->l_rw->l_tls_offset + TLS_PRE_TCB_SIZE;
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
@@ -163,7 +163,7 @@ elf_get_dynamic_info (struct link_map *l, bool bootstrap,
{
l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
if (l->l_flags_1 & DF_1_NODELETE)
- l->l_nodelete_pending = true;
+ l->l_rw->l_nodelete_pending = true;
/* Only DT_1_SUPPORTED_MASK bits are supported, and we would like
to assert this, but we can't. Users have been setting
@@ -78,7 +78,7 @@ static const struct
for (map = MAPS; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_name, (int) map->l_rw->l_direct_opencount); \
fflush (stdout); \
} \
while (0)
@@ -190,7 +190,7 @@ main (int argc, char *argv[])
if (map->l_type == lt_loaded)
{
printf ("name = \"%s\", direct_opencount = %d\n",
- map->l_name, (int) map->l_direct_opencount);
+ map->l_name, (int) map->l_rw->l_direct_opencount);
result = 1;
}
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -460,6 +460,17 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
interfere with __rtld_static_init. */
GLRO (dl_find_object) = &_dl_find_object;
+ /* Pre-allocated read-write status of the ld.so link map. */
+ static struct
+ {
+ struct link_map_rw l;
+ struct auditstate _dl_rtld_auditstate[DL_NNS];
+ } rtld_map_rw;
+ _dl_rtld_map.l_rw = &rtld_map_rw.l;
+#if NO_TLS_OFFSET != 0
+ _dl_rtld_map.l_rw->l_tls_offset = NO_TLS_OFFSET;
+#endif
+
/* If it hasn't happen yet record the startup time. */
rtld_timer_start (&start_time);
#if !defined DONT_USE_BOOTSTRAP_MAP
@@ -482,7 +493,7 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
/* Copy the TLS related data if necessary. */
#ifndef DONT_USE_BOOTSTRAP_MAP
# if NO_TLS_OFFSET != 0
- _dl_rtld_map.l_tls_offset = NO_TLS_OFFSET;
+ _dl_rtld_map.l_rw->l_tls_offset = NO_TLS_OFFSET;
# endif
#endif
@@ -549,10 +560,6 @@ _dl_start (void *arg)
bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
elf_get_dynamic_info (&bootstrap_map, true, false);
-#if NO_TLS_OFFSET != 0
- bootstrap_map.l_tls_offset = NO_TLS_OFFSET;
-#endif
-
#ifdef ELF_MACHINE_BEFORE_RTLD_RELOC
ELF_MACHINE_BEFORE_RTLD_RELOC (&bootstrap_map, bootstrap_map.l_info);
#endif
@@ -1100,7 +1107,7 @@ rtld_setup_main_map (struct link_map *main_map)
/* Perhaps the executable has no PT_LOAD header entries at all. */
main_map->l_map_start = ~0;
/* And it was opened directly. */
- ++main_map->l_direct_opencount;
+ ++main_map->l_rw->l_direct_opencount;
main_map->l_contiguous = 1;
/* A PT_LOAD segment at an unexpected address will clear the
@@ -34,7 +34,8 @@ do_test (void)
printf ("thread variable address: %p\n", &thread_var);
printf ("thread pointer address: %p\n", __thread_pointer ());
printf ("pthread_self address: %p\n", (void *) pthread_self ());
- ptrdiff_t block_offset = ((struct link_map *) _r_debug.r_map)->l_tls_offset;
+ ptrdiff_t block_offset
+ = ((struct link_map *) _r_debug.r_map)->l_rw->l_tls_offset;
printf ("main program TLS block offset: %td\n", block_offset);
if ((uintptr_t) &thread_var < (uintptr_t) THREAD_SELF)
@@ -15,7 +15,7 @@
for (map = MAPS; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_name, (int) map->l_rw->l_direct_opencount); \
fflush (stdout)
typedef struct
@@ -12,7 +12,7 @@
for (map = MAPS; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_name, (int) map->l_rw->l_direct_opencount); \
fflush (stdout)
int
@@ -217,9 +217,10 @@ __pthread_init_static_tls (struct link_map *map)
continue;
# if TLS_TCB_AT_TP
- void *dest = (char *) t->tcb - map->l_tls_offset;
+ void *dest = (char *) t->tcb - map->l_rw->l_tls_offset;
# elif TLS_DTV_AT_TP
- void *dest = (char *) t->tcb + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = ((char *) t->tcb + map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
@@ -83,6 +83,71 @@ struct r_search_path_struct
extern struct r_search_path_struct __rtld_search_dirs attribute_hidden;
extern struct r_search_path_struct __rtld_env_path_list attribute_hidden;
+
+/* Link map attributes that are always readable and writable. */
+struct link_map_rw
+{
+ /* List of the dependencies introduced through symbol binding. */
+ struct link_map_reldeps
+ {
+ unsigned int act;
+ struct link_map *list[];
+ } *l_reldeps;
+ unsigned int l_reldepsmax;
+
+ /* Reference count for dlopen/dlclose. */
+ unsigned int l_direct_opencount;
+
+ /* For objects present at startup time: offset in the static TLS
+ block. For loaded objects, it can be NO_TLS_OFFSET (not yet
+ initialized), FORCED_DYNAMIC_TLS_OFFSET (if fully dynamic TLS is
+ used), or an actual TLS offset (if the static TLS allocation has
+ been re-used to satisfy dynamic TLS needs).
+
+ This field is written outside the general loader lock, so it has
+ to reside in the read-write porition of the link map. */
+#ifndef NO_TLS_OFFSET
+# define NO_TLS_OFFSET 0
+#endif
+#ifndef FORCED_DYNAMIC_TLS_OFFSET
+# if NO_TLS_OFFSET == 0
+# define FORCED_DYNAMIC_TLS_OFFSET -1
+# elif NO_TLS_OFFSET == -1
+# define FORCED_DYNAMIC_TLS_OFFSET -2
+# else
+# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
+# endif
+#endif
+ ptrdiff_t l_tls_offset;
+
+ /* Number of thread_local objects constructed by this DSO. This is
+ atomically accessed and modified and is not always protected by the load
+ lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
+ size_t l_tls_dtor_count;
+
+ /* Ture if ELF constructors have been called. */
+ bool l_init_called;
+
+ /* NODELETE status of the map. Only valid for maps of type
+ lt_loaded. Lazy binding sets l_nodelete_active directly,
+ potentially from signal handlers. Initial loading of an
+ DF_1_NODELETE object set l_nodelete_pending. Relocation may
+ set l_nodelete_pending as well. l_nodelete_pending maps are
+ promoted to l_nodelete_active status in the final stages of
+ dlopen, prior to calling ELF constructors. dlclose only
+ refuses to unload l_nodelete_active maps, the pending status is
+ ignored. */
+ bool l_nodelete_active;
+ bool l_nodelete_pending;
+
+ /* Used for dependency sorting in dlclose/_dl_fini. These need to
+ be writable all the time because there is no way to report an
+ error in _dl_fini. These flags can be moved into struct
+ link_map_private once _dl_fini no longer re-sorts link maps. */
+ bool l_visited;
+ int l_idx;
+};
+
/* Structure describing a loaded shared object. The `l_next' and `l_prev'
members form a chain of all the shared objects loaded at startup.
@@ -111,6 +176,9 @@ struct link_map
than one namespace. */
struct link_map *l_real;
+ /* Run-time writable fields. */
+ struct link_map_rw *l_rw;
+
/* Number of the namespace this link map belongs to. */
Lmid_t l_ns;
@@ -170,7 +238,6 @@ struct link_map
const Elf_Symndx *l_buckets;
};
- unsigned int l_direct_opencount; /* Reference count for dlopen/dlclose. */
enum /* Where this object came from. */
{
lt_executable, /* The main executable program. */
@@ -180,12 +247,9 @@ struct link_map
unsigned int l_dt_relr_ref:1; /* Nonzero if GLIBC_ABI_DT_RELR is
referenced. */
unsigned int l_relocated:1; /* Nonzero if object's relocations done. */
- unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
unsigned int l_main_map:1; /* Nonzero for the map of the main program. */
- unsigned int l_visited:1; /* Used internally for map dependency
- graph traversal. */
unsigned int l_map_used:1; /* These two bits are used during traversal */
unsigned int l_map_done:1; /* of maps in _dl_close_worker. */
unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
@@ -214,18 +278,6 @@ struct link_map
lt_library map. */
unsigned int l_tls_in_slotinfo:1; /* TLS slotinfo updated in dlopen. */
- /* NODELETE status of the map. Only valid for maps of type
- lt_loaded. Lazy binding sets l_nodelete_active directly,
- potentially from signal handlers. Initial loading of an
- DF_1_NODELETE object set l_nodelete_pending. Relocation may
- set l_nodelete_pending as well. l_nodelete_pending maps are
- promoted to l_nodelete_active status in the final stages of
- dlopen, prior to calling ELF constructors. dlclose only
- refuses to unload l_nodelete_active maps, the pending status is
- ignored. */
- bool l_nodelete_active;
- bool l_nodelete_pending;
-
#include <link_map.h>
/* Collected information about own RPATH directories. */
@@ -277,14 +329,6 @@ struct link_map
/* List of object in order of the init and fini calls. */
struct link_map **l_initfini;
- /* List of the dependencies introduced through symbol binding. */
- struct link_map_reldeps
- {
- unsigned int act;
- struct link_map *list[];
- } *l_reldeps;
- unsigned int l_reldepsmax;
-
/* Nonzero if the DSO is used. */
unsigned int l_used;
@@ -293,9 +337,6 @@ struct link_map
ElfW(Word) l_flags_1;
ElfW(Word) l_flags;
- /* Temporarily used in `dl_close'. */
- int l_idx;
-
struct link_map_machine l_mach;
struct
@@ -318,28 +359,9 @@ struct link_map
size_t l_tls_align;
/* Offset of first byte module alignment. */
size_t l_tls_firstbyte_offset;
-#ifndef NO_TLS_OFFSET
-# define NO_TLS_OFFSET 0
-#endif
-#ifndef FORCED_DYNAMIC_TLS_OFFSET
-# if NO_TLS_OFFSET == 0
-# define FORCED_DYNAMIC_TLS_OFFSET -1
-# elif NO_TLS_OFFSET == -1
-# define FORCED_DYNAMIC_TLS_OFFSET -2
-# else
-# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
-# endif
-#endif
- /* For objects present at startup time: offset in the static TLS block. */
- ptrdiff_t l_tls_offset;
/* Index of the module in the dtv array. */
size_t l_tls_modid;
- /* Number of thread_local objects constructed by this DSO. This is
- atomically accessed and modified and is not always protected by the load
- lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
- size_t l_tls_dtor_count;
-
/* Information used to change permission after the relocations are
done. */
ElfW(Addr) l_relro_addr;
@@ -350,15 +372,16 @@ struct link_map
#include <dl-relocate-ld.h>
-/* Information used by audit modules. For most link maps, this data
- immediate follows the link map in memory. For the dynamic linker,
- it is allocated separately. See link_map_audit_state in
- <ldsodefs.h>. */
+/* Information used by audit modules. An array of size GLRO (naudit)
+ elements follows the l_rw link map data in memory (in some cases
+ conservatively extended to to DL_NNS). */
struct auditstate
{
uintptr_t cookie;
unsigned int bindflags;
};
+_Static_assert (__alignof (struct auditstate) <= __alignof (struct link_map_rw),
+ "auditstate alignment compatible with link_map_rw alignment");
/* This is the hidden instance of struct r_debug_extended used by the
@@ -404,8 +404,9 @@ libc {
_thread_db_dtv_slotinfo_map;
_thread_db_dtv_t_counter;
_thread_db_dtv_t_pointer_val;
+ _thread_db_link_map_l_rw;
_thread_db_link_map_l_tls_modid;
- _thread_db_link_map_l_tls_offset;
+ _thread_db_link_map_rw_l_tls_offset;
_thread_db_list_t_next;
_thread_db_list_t_prev;
_thread_db_pthread_cancelhandling;
@@ -38,6 +38,7 @@ typedef struct
} dtv;
typedef struct link_map link_map;
+typedef struct link_map_rw link_map_rw;
typedef struct rtld_global rtld_global;
typedef struct dtv_slotinfo_list dtv_slotinfo_list;
typedef struct dtv_slotinfo dtv_slotinfo;
@@ -93,7 +93,8 @@ DB_STRUCT (pthread_key_data_level2)
DB_STRUCT_ARRAY_FIELD (pthread_key_data_level2, data)
DB_STRUCT_FIELD (link_map, l_tls_modid)
-DB_STRUCT_FIELD (link_map, l_tls_offset)
+DB_STRUCT_FIELD (link_map, l_rw)
+DB_STRUCT_FIELD (link_map_rw, l_tls_offset)
DB_STRUCT_ARRAY_FIELD (dtv, dtv)
#define pointer_val pointer.val /* Field of anonymous struct in dtv_t. */
@@ -191,9 +191,15 @@ td_thr_tlsbase (const td_thrhandle_t *th,
/* Is the DTV current enough? */
if (dtvgen < modgen)
{
- try_static_tls:
- /* If the module uses Static TLS, we're still good. */
- err = DB_GET_FIELD (temp, th->th_ta_p, map, link_map, l_tls_offset, 0);
+ try_static_tls:;
+ /* If the module uses Static TLS, we're still good. Follow the
+ l_rw pointer to l_tls_offset. */
+ psaddr_t l_rw;
+ err = DB_GET_FIELD (l_rw, th->th_ta_p, map, link_map, l_rw, 0);
+ if (err != TD_OK)
+ return err;
+ err = DB_GET_FIELD (temp, th->th_ta_p, l_rw, link_map_rw,
+ l_tls_offset, 0);
if (err != TD_OK)
return err;
ptrdiff_t tlsoff = (uintptr_t)temp;
@@ -133,7 +133,7 @@ __cxa_thread_atexit_impl (dtor_func func, void *obj, void *dso_symbol)
_dl_close_worker is protected by the dl_load_lock. The execution in
__call_tls_dtors does not really depend on this value beyond the fact that
it should be atomic, so Relaxed MO should be sufficient. */
- atomic_fetch_add_relaxed (&lm_cache->l_tls_dtor_count, 1);
+ atomic_fetch_add_relaxed (&lm_cache->l_rw->l_tls_dtor_count, 1);
__rtld_lock_unlock_recursive (GL(dl_load_lock));
new->map = lm_cache;
@@ -159,7 +159,7 @@ __call_tls_dtors (void)
l_tls_dtor_count decrement. That way, we protect this access from a
potential DSO unload in _dl_close_worker, which happens when
l_tls_dtor_count is 0. See CONCURRENCY NOTES for more detail. */
- atomic_fetch_add_release (&cur->map->l_tls_dtor_count, -1);
+ atomic_fetch_add_release (&cur->map->l_rw->l_tls_dtor_count, -1);
free (cur);
}
}
@@ -249,7 +249,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
else
# endif
{
- td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ + sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
@@ -274,7 +275,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr =
- sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
+ sym->st_value + reloc->r_addend + sym_map->l_rw->l_tls_offset;
}
break;
@@ -401,12 +401,12 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
else if (r_type == R_ALPHA_TPREL64)
{
# ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym_raw_value + map->l_tls_offset;
+ *reloc_addr = sym_raw_value + map->l_rw->l_tls_offset;
# else
if (sym_map)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_raw_value + sym_map->l_tls_offset;
+ *reloc_addr = sym_raw_value + sym_map->l_rw->l_tls_offset;
}
# endif
}
@@ -284,7 +284,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
+ *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
break;
@@ -394,7 +394,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
# endif
# endif
{
- td->argument.value = value + sym_map->l_tls_offset;
+ td->argument.value = value + sym_map->l_rw->l_tls_offset;
td->entry = _dl_tlsdesc_return;
}
}
@@ -424,7 +424,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym->st_value + sym_map->l_tls_offset;
+ *reloc_addr += sym->st_value + sym_map->l_rw->l_tls_offset;
}
break;
case R_ARM_IRELATIVE:
@@ -302,7 +302,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = (sym->st_value + sym_map->l_tls_offset
+ *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
}
break;
@@ -1342,15 +1342,9 @@ is_rtld_link_map (const struct link_map *l)
static inline struct auditstate *
link_map_audit_state (struct link_map *l, size_t index)
{
- if (is_rtld_link_map (l))
- /* The auditstate array is stored separately. */
- return _dl_rtld_auditstate + index;
- else
- {
- /* The auditstate array follows the link map in memory. */
- struct auditstate *base = (struct auditstate *) (l + 1);
- return &base[index];
- }
+ /* The auditstate array follows the read-write link map part in memory. */
+ struct auditstate *base = (struct auditstate *) (l->l_rw + 1);
+ return &base[index];
}
/* Call the la_objsearch from the audit modules from the link map L. If
@@ -715,7 +715,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
+ value = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
break;
#endif /* use TLS */
@@ -353,7 +353,8 @@ and creates an unsatisfiable circular dependency.\n",
# endif
# endif
{
- td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ - sym_map->l_rw->l_tls_offset
+ (ElfW(Word))td->arg);
td->entry = _dl_tlsdesc_return;
}
@@ -363,7 +364,7 @@ and creates an unsatisfiable circular dependency.\n",
case R_386_TLS_TPOFF32:
/* The offset is positive, backward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
- *reloc_addr += map->l_tls_offset - sym->st_value;
+ *reloc_addr += map->l_rw->l_tls_offset - sym->st_value;
# else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be subtracted from the
@@ -372,14 +373,14 @@ and creates an unsatisfiable circular dependency.\n",
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym_map->l_tls_offset - sym->st_value;
+ *reloc_addr += sym_map->l_rw->l_tls_offset - sym->st_value;
}
# endif
break;
case R_386_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
- *reloc_addr += sym->st_value - map->l_tls_offset;
+ *reloc_addr += sym->st_value - map->l_rw->l_tls_offset;
# else
/* We know the offset of object the symbol is contained in.
It is a negative value which will be added to the
@@ -387,7 +388,7 @@ and creates an unsatisfiable circular dependency.\n",
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym->st_value - sym_map->l_tls_offset;
+ *reloc_addr += sym->st_value - sym_map->l_rw->l_tls_offset;
}
# endif
break;
@@ -37,7 +37,7 @@ extern void *__tls_get_addr (tls_index *ti);
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) ((sym)->st_value - TLS_DTV_OFFSET)
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a TPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym, reloc) \
- ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
- TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
@@ -262,7 +262,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value + sym_map->l_tls_offset + reloc->r_addend;
+ *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
+ + reloc->r_addend);
}
}
#endif
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
@@ -250,13 +250,13 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
case R_OR1K_TLS_TPOFF:
# ifdef RTLD_BOOTSTRAP
*reloc_addr = sym->st_value + reloc->r_addend +
- map->l_tls_offset - TLS_TCB_SIZE;
+ map->l_rw->l_tls_offset - TLS_TCB_SIZE;
# else
if (sym_map != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = sym->st_value + reloc->r_addend +
- sym_map->l_tls_offset - TLS_TCB_SIZE;
+ sym_map->l_rw->l_tls_offset - TLS_TCB_SIZE;
}
# endif
break;
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a @tprel reloc. */
#define TLS_TPREL_VALUE(sym_map, sym, reloc) \
- ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
- TLS_TP_OFFSET)
/* Compute the value for a @dtprel reloc. */
@@ -354,7 +354,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (!NOT_BOOTSTRAP)
{
reloc_addr[0] = 0;
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
break;
}
@@ -368,7 +368,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
reloc_addr[0] = 0;
/* Set up for local dynamic. */
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
break;
}
@@ -748,7 +748,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
#ifdef RTLD_BOOTSTRAP
reloc_addr[0] = 0;
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
return;
#else
@@ -762,7 +762,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
reloc_addr[0] = 0;
/* Set up for local dynamic. */
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
return;
}
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
@@ -339,7 +339,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
case R_390_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
+ *reloc_addr = (sym->st_value + reloc->r_addend
+ - map->l_rw->l_tls_offset);
#else
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
@@ -348,7 +349,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
}
#endif
break;
@@ -321,7 +321,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
case R_390_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
+ *reloc_addr = (sym->st_value + reloc->r_addend
+ - map->l_rw->l_tls_offset);
#else
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
@@ -330,7 +331,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
}
#endif
break;
@@ -363,7 +363,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
case R_SH_TLS_TPOFF32:
/* The offset is positive, afterward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = map->l_tls_offset + sym->st_value + reloc->r_addend;
+ *reloc_addr = (map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
#else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be added to the thread
@@ -372,8 +373,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_map->l_tls_offset + sym->st_value
- + reloc->r_addend;
+ *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
#endif
break;
@@ -371,7 +371,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value - sym_map->l_tls_offset
+ *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
}
break;
@@ -381,7 +381,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym->st_value - sym_map->l_tls_offset
+ value = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
if (r_type == R_SPARC_TLS_LE_HIX22)
*reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
@@ -387,7 +387,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value - sym_map->l_tls_offset
+ *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
}
break;
@@ -397,7 +397,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym->st_value - sym_map->l_tls_offset
+ value = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
if (r_type == R_SPARC_TLS_LE_HIX22)
*(unsigned int *)reloc_addr =
@@ -40,7 +40,7 @@ dl_isa_level_check (struct link_map *m, const char *program)
l = m->l_initfini[i];
/* Skip ISA level check if functions have been executed. */
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
continue;
#ifdef SHARED
@@ -383,7 +383,8 @@ and creates an unsatisfiable circular dependency.\n",
else
# endif
{
- td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ - sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
@@ -399,7 +400,7 @@ and creates an unsatisfiable circular dependency.\n",
It is a negative value which will be added to the
thread pointer. */
value = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
# ifdef __ILP32__
/* The symbol and addend values are 32 bits but the GOT
entry is 64 bits wide and the whole 64-bit entry is used