@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -29,7 +29,7 @@ _dl_call_fini (void *closure_map)
_dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", map->l_name, map->l_ns);
/* Make sure nothing happens if we are called twice. */
- map->l_init_called = 0;
+ map->l_rw->l_init_called = 0;
ElfW(Dyn) *fini_array = map->l_info[DT_FINI_ARRAY];
if (fini_array != NULL)
@@ -109,23 +109,23 @@ void
_dl_close_worker (struct link_map *map, bool force)
{
/* One less direct use. */
- --map->l_direct_opencount;
+ --map->l_rw->l_direct_opencount;
/* If _dl_close is called recursively (some destructor call dlclose),
just record that the parent _dl_close will need to do garbage collection
again and return. */
static enum { not_pending, pending, rerun } dl_close_state;
- if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
+ if (map->l_rw->l_direct_opencount > 0 || map->l_type != lt_loaded
|| dl_close_state != not_pending)
{
- if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
+ if (map->l_rw->l_direct_opencount == 0 && map->l_type == lt_loaded)
dl_close_state = rerun;
/* There are still references to this object. Do nothing more. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
- map->l_name, map->l_direct_opencount);
+ map->l_name, map->l_rw->l_direct_opencount);
return;
}
@@ -147,7 +147,7 @@ _dl_close_worker (struct link_map *map, bool force)
{
l->l_map_used = 0;
l->l_map_done = 0;
- l->l_idx = idx;
+ l->l_rw->l_idx = idx;
maps[idx] = l;
++idx;
}
@@ -157,10 +157,10 @@ _dl_close_worker (struct link_map *map, bool force)
The map variable is NULL after a retry. */
if (map != NULL)
{
- maps[map->l_idx] = maps[0];
- maps[map->l_idx]->l_idx = map->l_idx;
+ maps[map->l_rw->l_idx] = maps[0];
+ maps[map->l_rw->l_idx]->l_rw->l_idx = map->l_rw->l_idx;
maps[0] = map;
- maps[0]->l_idx = 0;
+ maps[0]->l_rw->l_idx = 0;
}
/* Keep track of the lowest index link map we have covered already. */
@@ -175,11 +175,11 @@ _dl_close_worker (struct link_map *map, bool force)
/* Check whether this object is still used. */
if (l->l_type == lt_loaded
- && l->l_direct_opencount == 0
- && !l->l_nodelete_active
+ && l->l_rw->l_direct_opencount == 0
+ && !l->l_rw->l_nodelete_active
/* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
acquire is sufficient and correct. */
- && atomic_load_acquire (&l->l_tls_dtor_count) == 0
+ && atomic_load_acquire (&l->l_rw->l_tls_dtor_count) == 0
&& !l->l_map_used)
continue;
@@ -187,7 +187,7 @@ _dl_close_worker (struct link_map *map, bool force)
l->l_map_used = 1;
l->l_map_done = 1;
/* Signal the object is still needed. */
- l->l_idx = IDX_STILL_USED;
+ l->l_rw->l_idx = IDX_STILL_USED;
/* Mark all dependencies as used. */
if (l->l_initfini != NULL)
@@ -197,9 +197,10 @@ _dl_close_worker (struct link_map *map, bool force)
struct link_map **lp = &l->l_initfini[1];
while (*lp != NULL)
{
- if ((*lp)->l_idx != IDX_STILL_USED)
+ if ((*lp)->l_rw->l_idx != IDX_STILL_USED)
{
- assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
+ assert ((*lp)->l_rw->l_idx >= 0
+ && (*lp)->l_rw->l_idx < nloaded);
if (!(*lp)->l_map_used)
{
@@ -208,8 +209,8 @@ _dl_close_worker (struct link_map *map, bool force)
already processed it, then we need to go back
and process again from that point forward to
ensure we keep all of its dependencies also. */
- if ((*lp)->l_idx - 1 < done_index)
- done_index = (*lp)->l_idx - 1;
+ if ((*lp)->l_rw->l_idx - 1 < done_index)
+ done_index = (*lp)->l_rw->l_idx - 1;
}
}
@@ -217,20 +218,20 @@ _dl_close_worker (struct link_map *map, bool force)
}
}
/* And the same for relocation dependencies. */
- if (l->l_reldeps != NULL)
- for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
+ if (l->l_rw->l_reldeps != NULL)
+ for (unsigned int j = 0; j < l->l_rw->l_reldeps->act; ++j)
{
- struct link_map *jmap = l->l_reldeps->list[j];
+ struct link_map *jmap = l->l_rw->l_reldeps->list[j];
- if (jmap->l_idx != IDX_STILL_USED)
+ if (jmap->l_rw->l_idx != IDX_STILL_USED)
{
- assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
+ assert (jmap->l_rw->l_idx >= 0 && jmap->l_rw->l_idx < nloaded);
if (!jmap->l_map_used)
{
jmap->l_map_used = 1;
- if (jmap->l_idx - 1 < done_index)
- done_index = jmap->l_idx - 1;
+ if (jmap->l_rw->l_idx - 1 < done_index)
+ done_index = jmap->l_rw->l_idx - 1;
}
}
}
@@ -255,12 +256,12 @@ _dl_close_worker (struct link_map *map, bool force)
if (!imap->l_map_used)
{
- assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
+ assert (imap->l_type == lt_loaded && !imap->l_rw->l_nodelete_active);
/* Call its termination function. Do not do it for
half-cooked objects. Temporarily disable exception
handling, so that errors are fatal. */
- if (imap->l_init_called)
+ if (imap->l_rw->l_init_called)
_dl_catch_exception (NULL, _dl_call_fini, imap);
#ifdef SHARED
@@ -327,7 +328,7 @@ _dl_close_worker (struct link_map *map, bool force)
((char *) imap->l_scope[cnt]
- offsetof (struct link_map, l_searchlist));
assert (tmap->l_ns == nsid);
- if (tmap->l_idx == IDX_STILL_USED)
+ if (tmap->l_rw->l_idx == IDX_STILL_USED)
++remain;
else
removed_any = true;
@@ -372,7 +373,7 @@ _dl_close_worker (struct link_map *map, bool force)
struct link_map *tmap = (struct link_map *)
((char *) imap->l_scope[cnt]
- offsetof (struct link_map, l_searchlist));
- if (tmap->l_idx != IDX_STILL_USED)
+ if (tmap->l_rw->l_idx != IDX_STILL_USED)
{
/* Remove the scope. Or replace with own map's
scope. */
@@ -417,7 +418,7 @@ _dl_close_worker (struct link_map *map, bool force)
/* The loader is gone, so mark the object as not having one.
Note: l_idx != IDX_STILL_USED -> object will be removed. */
if (imap->l_loader != NULL
- && imap->l_loader->l_idx != IDX_STILL_USED)
+ && imap->l_loader->l_rw->l_idx != IDX_STILL_USED)
imap->l_loader = NULL;
/* Remember where the first dynamically loaded object is. */
@@ -507,14 +508,14 @@ _dl_close_worker (struct link_map *map, bool force)
if (GL(dl_tls_dtv_slotinfo_list) != NULL
&& ! remove_slotinfo (imap->l_tls_modid,
GL(dl_tls_dtv_slotinfo_list), 0,
- imap->l_init_called))
+ imap->l_rw->l_init_called))
/* All dynamically loaded modules with TLS are unloaded. */
/* Can be read concurrently. */
atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
GL(dl_tls_static_nelem));
- if (imap->l_tls_offset != NO_TLS_OFFSET
- && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
+ if (imap->l_rw->l_tls_offset != NO_TLS_OFFSET
+ && imap->l_rw->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
{
/* Collect a contiguous chunk built from the objects in
this search list, going in either direction. When the
@@ -522,19 +523,19 @@ _dl_close_worker (struct link_map *map, bool force)
reclaim it. */
#if TLS_TCB_AT_TP
if (tls_free_start == NO_TLS_OFFSET
- || (size_t) imap->l_tls_offset == tls_free_start)
+ || (size_t) imap->l_rw->l_tls_offset == tls_free_start)
{
/* Extend the contiguous chunk being reclaimed. */
tls_free_start
- = imap->l_tls_offset - imap->l_tls_blocksize;
+ = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
if (tls_free_end == NO_TLS_OFFSET)
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
}
- else if (imap->l_tls_offset - imap->l_tls_blocksize
+ else if (imap->l_rw->l_tls_offset - imap->l_tls_blocksize
== tls_free_end)
/* Extend the chunk backwards. */
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
else
{
/* This isn't contiguous with the last chunk freed.
@@ -543,19 +544,20 @@ _dl_close_worker (struct link_map *map, bool force)
if (tls_free_end == GL(dl_tls_static_used))
{
GL(dl_tls_static_used) = tls_free_start;
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
tls_free_start
= tls_free_end - imap->l_tls_blocksize;
}
- else if ((size_t) imap->l_tls_offset
+ else if ((size_t) imap->l_rw->l_tls_offset
== GL(dl_tls_static_used))
GL(dl_tls_static_used)
- = imap->l_tls_offset - imap->l_tls_blocksize;
- else if (tls_free_end < (size_t) imap->l_tls_offset)
+ = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
+ else if (tls_free_end
+ < (size_t) imap->l_rw->l_tls_offset)
{
/* We pick the later block. It has a chance to
be freed. */
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
tls_free_start
= tls_free_end - imap->l_tls_blocksize;
}
@@ -564,34 +566,37 @@ _dl_close_worker (struct link_map *map, bool force)
if (tls_free_start == NO_TLS_OFFSET)
{
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = (imap->l_tls_offset
+ tls_free_end = (imap->l_rw->l_tls_offset
+ imap->l_tls_blocksize);
}
else if (imap->l_tls_firstbyte_offset == tls_free_end)
/* Extend the contiguous chunk being reclaimed. */
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
- else if (imap->l_tls_offset + imap->l_tls_blocksize
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
+ else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
== tls_free_start)
/* Extend the chunk backwards. */
tls_free_start = imap->l_tls_firstbyte_offset;
/* This isn't contiguous with the last chunk freed.
One of them will be leaked unless we can free
one block right away. */
- else if (imap->l_tls_offset + imap->l_tls_blocksize
+ else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
== GL(dl_tls_static_used))
GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
else if (tls_free_end == GL(dl_tls_static_used))
{
GL(dl_tls_static_used) = tls_free_start;
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
}
else if (tls_free_end < imap->l_tls_firstbyte_offset)
{
/* We pick the later block. It has a chance to
be freed. */
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
}
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
@@ -663,7 +668,8 @@ _dl_close_worker (struct link_map *map, bool force)
if (imap->l_origin != (char *) -1)
free ((char *) imap->l_origin);
- free (imap->l_reldeps);
+ free (imap->l_rw->l_reldeps);
+ free (imap->l_rw);
/* Print debugging message. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
@@ -769,7 +775,7 @@ _dl_close (void *_map)
before we took the lock. There is no way to detect this (see below)
so we proceed assuming this isn't the case. First see whether we
can remove the object at all. */
- if (__glibc_unlikely (map->l_nodelete_active))
+ if (__glibc_unlikely (map->l_rw->l_nodelete_active))
{
/* Nope. Do nothing. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
@@ -786,7 +792,7 @@ _dl_close (void *_map)
should be a detectable case and given that dlclose should be threadsafe
we need this to be a reliable detection.
This is bug 20990. */
- if (__builtin_expect (map->l_direct_opencount, 1) == 0)
+ if (__builtin_expect (map->l_rw->l_direct_opencount, 1) == 0)
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
_dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
@@ -478,20 +478,20 @@ _dl_map_object_deps (struct link_map *map,
/* Maybe we can remove some relocation dependencies now. */
struct link_map_reldeps *l_reldeps = NULL;
- if (map->l_reldeps != NULL)
+ if (map->l_rw->l_reldeps != NULL)
{
for (i = 0; i < nlist; ++i)
map->l_searchlist.r_list[i]->l_reserved = 1;
/* Avoid removing relocation dependencies of the main binary. */
map->l_reserved = 0;
- struct link_map **list = &map->l_reldeps->list[0];
- for (i = 0; i < map->l_reldeps->act; ++i)
+ struct link_map **list = &map->l_rw->l_reldeps->list[0];
+ for (i = 0; i < map->l_rw->l_reldeps->act; ++i)
if (list[i]->l_reserved)
{
/* Need to allocate new array of relocation dependencies. */
l_reldeps = malloc (sizeof (*l_reldeps)
- + map->l_reldepsmax
+ + map->l_rw->l_reldepsmax
* sizeof (struct link_map *));
if (l_reldeps == NULL)
/* Bad luck, keep the reldeps duplicated between
@@ -502,7 +502,7 @@ _dl_map_object_deps (struct link_map *map,
unsigned int j = i;
memcpy (&l_reldeps->list[0], &list[0],
i * sizeof (struct link_map *));
- for (i = i + 1; i < map->l_reldeps->act; ++i)
+ for (i = i + 1; i < map->l_rw->l_reldeps->act; ++i)
if (!list[i]->l_reserved)
l_reldeps->list[j++] = list[i];
l_reldeps->act = j;
@@ -547,8 +547,8 @@ _dl_map_object_deps (struct link_map *map,
if (l_reldeps != NULL)
{
atomic_write_barrier ();
- void *old_l_reldeps = map->l_reldeps;
- map->l_reldeps = l_reldeps;
+ void *old_l_reldeps = map->l_rw->l_reldeps;
+ map->l_rw->l_reldeps = l_reldeps;
_dl_scope_free (old_l_reldeps);
}
if (old_l_initfini != NULL)
@@ -508,7 +508,7 @@ _dlfo_process_initial (void)
if (l != main_map && l == l->l_real)
{
/* lt_library link maps are implicitly NODELETE. */
- if (l->l_type == lt_library || l->l_nodelete_active)
+ if (l->l_type == lt_library || l->l_rw->l_nodelete_active)
{
if (_dlfo_nodelete_mappings != NULL)
/* Second pass only. */
@@ -78,12 +78,12 @@ _dl_fini (void)
assert (i < nloaded);
maps[i] = l;
- l->l_idx = i;
+ l->l_rw->l_idx = i;
++i;
/* Bump l_direct_opencount of all objects so that they
are not dlclose()ed from underneath us. */
- ++l->l_direct_opencount;
+ ++l->l_rw->l_direct_opencount;
}
else
/* Used below to call la_objclose for the ld.so proxy
@@ -115,7 +115,7 @@ _dl_fini (void)
{
struct link_map *l = maps[i];
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
{
_dl_call_fini (l);
#ifdef SHARED
@@ -125,7 +125,7 @@ _dl_fini (void)
}
/* Correct the previous increment. */
- --l->l_direct_opencount;
+ --l->l_rw->l_direct_opencount;
}
if (proxy_link_map != NULL)
@@ -34,13 +34,13 @@ call_init (struct link_map *l, int argc, char **argv, char **env)
need relocation.) */
assert (l->l_relocated || l->l_type == lt_executable);
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
/* This object is all done. */
return;
/* Avoid handling this constructor again in case we have a circular
dependency. */
- l->l_init_called = 1;
+ l->l_rw->l_init_called = 1;
/* Check for object which constructors we do not run here. */
if (__builtin_expect (l->l_name[0], 'a') == '\0'
@@ -175,9 +175,9 @@ static void
mark_nodelete (struct link_map *map, int flags)
{
if (flags & DL_LOOKUP_FOR_RELOCATE)
- map->l_nodelete_pending = true;
+ map->l_rw->l_nodelete_pending = true;
else
- map->l_nodelete_active = true;
+ map->l_rw->l_nodelete_active = true;
}
/* Return true if MAP is marked as NODELETE according to the lookup
@@ -187,8 +187,8 @@ is_nodelete (struct link_map *map, int flags)
{
/* Non-pending NODELETE always counts. Pending NODELETE only counts
during initial relocation processing. */
- return map->l_nodelete_active
- || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_nodelete_pending);
+ return map->l_rw->l_nodelete_active
+ || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_rw->l_nodelete_pending);
}
/* Utility function for do_lookup_x. Lookup an STB_GNU_UNIQUE symbol
@@ -532,7 +532,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
return 0;
struct link_map_reldeps *l_reldeps
- = atomic_forced_read (undef_map->l_reldeps);
+ = atomic_forced_read (undef_map->l_rw->l_reldeps);
/* Make sure l_reldeps is read before l_initfini. */
atomic_read_barrier ();
@@ -591,22 +591,22 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
/* Redo the l_reldeps check if undef_map's l_reldeps changed in
the mean time. */
- if (undef_map->l_reldeps != NULL)
+ if (undef_map->l_rw->l_reldeps != NULL)
{
- if (undef_map->l_reldeps != l_reldeps)
+ if (undef_map->l_rw->l_reldeps != l_reldeps)
{
- struct link_map **list = &undef_map->l_reldeps->list[0];
- l_reldepsact = undef_map->l_reldeps->act;
+ struct link_map **list = &undef_map->l_rw->l_reldeps->list[0];
+ l_reldepsact = undef_map->l_rw->l_reldeps->act;
for (i = 0; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
}
- else if (undef_map->l_reldeps->act > l_reldepsact)
+ else if (undef_map->l_rw->l_reldeps->act > l_reldepsact)
{
struct link_map **list
- = &undef_map->l_reldeps->list[0];
+ = &undef_map->l_rw->l_reldeps->list[0];
i = l_reldepsact;
- l_reldepsact = undef_map->l_reldeps->act;
+ l_reldepsact = undef_map->l_rw->l_reldeps->act;
for (; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
@@ -662,14 +662,14 @@ marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
}
/* Add the reference now. */
- if (__glibc_unlikely (l_reldepsact >= undef_map->l_reldepsmax))
+ if (__glibc_unlikely (l_reldepsact >= undef_map->l_rw->l_reldepsmax))
{
/* Allocate more memory for the dependency list. Since this
can never happen during the startup phase we can use
`realloc'. */
struct link_map_reldeps *newp;
- unsigned int max
- = undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
+ unsigned int max = (undef_map->l_rw->l_reldepsmax
+ ? undef_map->l_rw->l_reldepsmax * 2 : 10);
#ifdef RTLD_PREPARE_FOREIGN_CALL
RTLD_PREPARE_FOREIGN_CALL;
@@ -696,23 +696,23 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
else
{
if (l_reldepsact)
- memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
+ memcpy (&newp->list[0], &undef_map->l_rw->l_reldeps->list[0],
l_reldepsact * sizeof (struct link_map *));
newp->list[l_reldepsact] = map;
newp->act = l_reldepsact + 1;
atomic_write_barrier ();
- void *old = undef_map->l_reldeps;
- undef_map->l_reldeps = newp;
- undef_map->l_reldepsmax = max;
+ void *old = undef_map->l_rw->l_reldeps;
+ undef_map->l_rw->l_reldeps = newp;
+ undef_map->l_rw->l_reldepsmax = max;
if (old)
_dl_scope_free (old);
}
}
else
{
- undef_map->l_reldeps->list[l_reldepsact] = map;
+ undef_map->l_rw->l_reldeps->list[l_reldepsact] = map;
atomic_write_barrier ();
- undef_map->l_reldeps->act = l_reldepsact + 1;
+ undef_map->l_rw->l_reldeps->act = l_reldepsact + 1;
}
/* Display information if we are debugging. */
@@ -89,15 +89,20 @@ _dl_new_object (char *realname, const char *libname, int type,
# define audit_space 0
#endif
- new = (struct link_map *) calloc (sizeof (*new) + audit_space
- + sizeof (struct link_map *)
- + sizeof (*newname) + libname_len, 1);
+ new = calloc (sizeof (*new)
+ + sizeof (struct link_map_private *)
+ + sizeof (*newname) + libname_len, 1);
if (new == NULL)
return NULL;
+ new->l_rw = calloc (1, sizeof (*new->l_rw) + audit_space);
+ if (new->l_rw == NULL)
+ {
+ free (new);
+ return NULL;
+ }
new->l_real = new;
- new->l_symbolic_searchlist.r_list = (struct link_map **) ((char *) (new + 1)
- + audit_space);
+ new->l_symbolic_searchlist.r_list = (struct link_map **) ((char *) (new + 1));
new->l_libname = newname
= (struct libname_list *) (new->l_symbolic_searchlist.r_list + 1);
@@ -131,7 +136,7 @@ _dl_new_object (char *realname, const char *libname, int type,
new->l_used = 1;
new->l_loader = loader;
#if NO_TLS_OFFSET != 0
- new->l_tls_offset = NO_TLS_OFFSET;
+ new->l_rw->l_tls_offset = NO_TLS_OFFSET;
#endif
new->l_ns = nsid;
@@ -261,7 +261,7 @@ resize_scopes (struct link_map *new)
/* If the initializer has been called already, the object has
not been loaded here and now. */
- if (imap->l_init_called && imap->l_type == lt_loaded)
+ if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
{
if (scope_has_map (imap, new))
/* Avoid duplicates. */
@@ -325,7 +325,7 @@ update_scopes (struct link_map *new)
struct link_map *imap = new->l_searchlist.r_list[i];
int from_scope = 0;
- if (imap->l_init_called && imap->l_type == lt_loaded)
+ if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
{
if (scope_has_map (imap, new))
/* Avoid duplicates. */
@@ -424,7 +424,7 @@ activate_nodelete (struct link_map *new)
NODELETE status for objects outside the local scope. */
for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
l = l->l_next)
- if (l->l_nodelete_pending)
+ if (l->l_rw->l_nodelete_pending)
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("activating NODELETE for %s [%lu]\n",
@@ -433,11 +433,11 @@ activate_nodelete (struct link_map *new)
/* The flag can already be true at this point, e.g. a signal
handler may have triggered lazy binding and set NODELETE
status immediately. */
- l->l_nodelete_active = true;
+ l->l_rw->l_nodelete_active = true;
/* This is just a debugging aid, to indicate that
activate_nodelete has run for this map. */
- l->l_nodelete_pending = false;
+ l->l_rw->l_nodelete_pending = false;
}
}
@@ -476,7 +476,7 @@ _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
_dl_start_profile ();
/* Prevent unloading the object. */
- GL(dl_profile_map)->l_nodelete_active = true;
+ GL(dl_profile_map)->l_rw->l_nodelete_active = true;
}
}
else
@@ -505,7 +505,7 @@ is_already_fully_open (struct link_map *map, int mode)
/* The object is already in the global scope if requested. */
&& (!(mode & RTLD_GLOBAL) || map->l_global)
/* The object is already NODELETE if requested. */
- && (!(mode & RTLD_NODELETE) || map->l_nodelete_active));
+ && (!(mode & RTLD_NODELETE) || map->l_rw->l_nodelete_active));
}
static void
@@ -547,7 +547,7 @@ dl_open_worker_begin (void *a)
return;
/* This object is directly loaded. */
- ++new->l_direct_opencount;
+ ++new->l_rw->l_direct_opencount;
/* It was already open. See is_already_fully_open above. */
if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
@@ -555,7 +555,8 @@ dl_open_worker_begin (void *a)
/* Let the user know about the opencount. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
- new->l_name, new->l_ns, new->l_direct_opencount);
+ new->l_name, new->l_ns,
+ new->l_rw->l_direct_opencount);
#ifdef SHARED
/* No relocation processing on this execution path. But
@@ -576,10 +577,10 @@ dl_open_worker_begin (void *a)
if (__glibc_unlikely (mode & RTLD_NODELETE))
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
- && !new->l_nodelete_active)
+ && !new->l_rw->l_nodelete_active)
_dl_debug_printf ("marking %s [%lu] as NODELETE\n",
new->l_name, new->l_ns);
- new->l_nodelete_active = true;
+ new->l_rw->l_nodelete_active = true;
}
/* Finalize the addition to the global scope. */
@@ -592,7 +593,7 @@ dl_open_worker_begin (void *a)
/* Schedule NODELETE marking for the directly loaded object if
requested. */
if (__glibc_unlikely (mode & RTLD_NODELETE))
- new->l_nodelete_pending = true;
+ new->l_rw->l_nodelete_pending = true;
/* Load that object's dependencies. */
_dl_map_object_deps (new, NULL, 0, 0,
@@ -795,7 +796,7 @@ dl_open_worker (void *a)
/* Let the user know about the opencount. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
- new->l_name, new->l_ns, new->l_direct_opencount);
+ new->l_name, new->l_ns, new->l_rw->l_direct_opencount);
}
void *
@@ -881,7 +882,7 @@ no more namespaces available for dlmopen()"));
if (is_already_fully_open (args.map, mode))
{
/* We can use the fast path. */
- ++args.map->l_direct_opencount;
+ ++args.map->l_rw->l_direct_opencount;
__rtld_lock_unlock_recursive (GL(dl_load_lock));
return args.map;
}
@@ -41,7 +41,7 @@
dynamically loaded. This can only work if there is enough surplus in
the static TLS area already allocated for each running thread. If this
object's TLS segment is too big to fit, we fail with -1. If it fits,
- we set MAP->l_tls_offset and return 0.
+ we set MAP->l_rw->l_tls_offset and return 0.
A portion of the surplus static TLS can be optionally used to optimize
dynamic TLS access (with TLSDESC or powerpc TLS optimizations).
If OPTIONAL is true then TLS is allocated for such optimization and
@@ -53,7 +53,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
{
/* If we've already used the variable with dynamic access, or if the
alignment requirements are too high, fail. */
- if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| map->l_tls_align > GLRO (dl_tls_static_align))
{
fail:
@@ -81,7 +81,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
size_t offset = GL(dl_tls_static_used) + use;
- map->l_tls_offset = GL(dl_tls_static_used) = offset;
+ map->l_rw->l_tls_offset = GL(dl_tls_static_used) = offset;
#elif TLS_DTV_AT_TP
/* dl_tls_static_used includes the TCB at the beginning. */
size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
@@ -100,7 +100,7 @@ _dl_try_allocate_static_tls (struct link_map *map, bool optional)
else if (optional)
GL(dl_tls_static_optional) -= use;
- map->l_tls_offset = offset;
+ map->l_rw->l_tls_offset = offset;
map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
GL(dl_tls_static_used) = used;
#else
@@ -134,7 +134,7 @@ void
__attribute_noinline__
_dl_allocate_static_tls (struct link_map *map)
{
- if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| _dl_try_allocate_static_tls (map, false))
{
_dl_signal_error (0, map->l_name, NULL, N_("\
@@ -150,9 +150,10 @@ void
_dl_nothread_init_static_tls (struct link_map *map)
{
#if TLS_TCB_AT_TP
- void *dest = (char *) THREAD_SELF - map->l_tls_offset;
+ void *dest = (char *) THREAD_SELF - map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = ((char *) THREAD_SELF + map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -51,7 +51,7 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
{
/* Do not handle ld.so in secondary namespaces and objects which
are not removed. */
- if (thisp != thisp->l_real || thisp->l_idx == -1)
+ if (thisp != thisp->l_real || thisp->l_rw->l_idx == -1)
goto skip;
}
@@ -87,10 +87,10 @@ _dl_sort_maps_original (struct link_map **maps, unsigned int nmaps,
goto next;
}
- if (__glibc_unlikely (for_fini && maps[k]->l_reldeps != NULL))
+ if (__glibc_unlikely (for_fini && maps[k]->l_rw->l_reldeps != NULL))
{
- unsigned int m = maps[k]->l_reldeps->act;
- struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
+ unsigned int m = maps[k]->l_rw->l_reldeps->act;
+ struct link_map **relmaps = &maps[k]->l_rw->l_reldeps->list[0];
/* Look through the relocation dependencies of the object. */
while (m-- > 0)
@@ -137,32 +137,32 @@ dfs_traversal (struct link_map ***rpo, struct link_map *map,
{
/* _dl_map_object_deps ignores l_faked objects when calculating the
number of maps before calling _dl_sort_maps, ignore them as well. */
- if (map->l_visited || map->l_faked)
+ if (map->l_rw->l_visited || map->l_faked)
return;
- map->l_visited = 1;
+ map->l_rw->l_visited = 1;
if (map->l_initfini)
{
for (int i = 0; map->l_initfini[i] != NULL; i++)
{
struct link_map *dep = map->l_initfini[i];
- if (dep->l_visited == 0
+ if (dep->l_rw->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
}
}
- if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
+ if (__glibc_unlikely (do_reldeps != NULL && map->l_rw->l_reldeps != NULL))
{
/* Indicate that we encountered relocation dependencies during
traversal. */
*do_reldeps = true;
- for (int m = map->l_reldeps->act - 1; m >= 0; m--)
+ for (int m = map->l_rw->l_reldeps->act - 1; m >= 0; m--)
{
- struct link_map *dep = map->l_reldeps->list[m];
- if (dep->l_visited == 0
+ struct link_map *dep = map->l_rw->l_reldeps->list[m];
+ if (dep->l_rw->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
}
@@ -181,7 +181,7 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
{
struct link_map *first_map = maps[0];
for (int i = nmaps - 1; i >= 0; i--)
- maps[i]->l_visited = 0;
+ maps[i]->l_rw->l_visited = 0;
/* We apply DFS traversal for each of maps[i] until the whole total order
is found and we're at the start of the Reverse-Postorder (RPO) sequence,
@@ -244,7 +244,7 @@ _dl_sort_maps_dfs (struct link_map **maps, unsigned int nmaps,
if (do_reldeps)
{
for (int i = nmaps - 1; i >= 0; i--)
- rpo[i]->l_visited = 0;
+ rpo[i]->l_rw->l_visited = 0;
struct link_map **maps_head = &maps[nmaps];
for (int i = nmaps - 1; i >= 0; i--)
@@ -29,8 +29,8 @@
can't be done, we fall back to the error that DF_STATIC_TLS is
intended to produce. */
#define HAVE_STATIC_TLS(map, sym_map) \
- (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET \
- && ((sym_map)->l_tls_offset \
+ (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET \
+ && ((sym_map)->l_rw->l_tls_offset \
!= FORCED_DYNAMIC_TLS_OFFSET), 1))
#define CHECK_STATIC_TLS(map, sym_map) \
@@ -40,9 +40,9 @@
} while (0)
#define TRY_STATIC_TLS(map, sym_map) \
- (__builtin_expect ((sym_map)->l_tls_offset \
+ (__builtin_expect ((sym_map)->l_rw->l_tls_offset \
!= FORCED_DYNAMIC_TLS_OFFSET, 1) \
- && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
+ && (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET, 1)\
|| _dl_try_allocate_static_tls (sym_map, true) == 0))
int _dl_try_allocate_static_tls (struct link_map *map, bool optional)
@@ -82,6 +82,7 @@ int _dl_bind_not;
static struct link_map _dl_main_map =
{
.l_name = (char *) "",
+ .l_rw = &(struct link_map_rw) { .l_tls_offset = NO_TLS_OFFSET, },
.l_real = &_dl_main_map,
.l_ns = LM_ID_BASE,
.l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
@@ -98,7 +99,6 @@ static struct link_map _dl_main_map =
.l_scope = _dl_main_map.l_scope_mem,
.l_local_scope = { &_dl_main_map.l_searchlist },
.l_used = 1,
- .l_tls_offset = NO_TLS_OFFSET,
.l_serial = 1,
};
@@ -299,7 +299,7 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- l->l_tls_offset = off;
+ l->l_rw->l_tls_offset = off;
continue;
}
}
@@ -316,7 +316,7 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- l->l_tls_offset = off;
+ l->l_rw->l_tls_offset = off;
}
/* Insert the extra TLS block after the last TLS block. */
@@ -378,9 +378,9 @@ _dl_determine_tlsoffset (void)
off += l->l_tls_align;
if (off + l->l_tls_blocksize - firstbyte <= freetop)
{
- l->l_tls_offset = off - firstbyte;
+ l->l_rw->l_tls_offset = off - firstbyte;
freebottom = (off + l->l_tls_blocksize
- - firstbyte);
+- - firstbyte);
continue;
}
}
@@ -389,7 +389,7 @@ _dl_determine_tlsoffset (void)
if (off - offset < firstbyte)
off += l->l_tls_align;
- l->l_tls_offset = off - firstbyte;
+ l->l_rw->l_tls_offset = off - firstbyte;
if (off - firstbyte - offset > freetop - freebottom)
{
freebottom = offset;
@@ -645,17 +645,17 @@ _dl_allocate_tls_init (void *result, bool main_thread)
dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
dtv[map->l_tls_modid].pointer.to_free = NULL;
- if (map->l_tls_offset == NO_TLS_OFFSET
- || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
+ if (map->l_rw->l_tls_offset == NO_TLS_OFFSET
+ || map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
continue;
assert (map->l_tls_modid == total + cnt);
assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
#if TLS_TCB_AT_TP
- assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
- dest = (char *) result - map->l_tls_offset;
+ assert ((size_t) map->l_rw->l_tls_offset >= map->l_tls_blocksize);
+ dest = (char *) result - map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- dest = (char *) result + map->l_tls_offset;
+ dest = (char *) result + map->l_rw->l_tls_offset;
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -959,22 +959,23 @@ tls_get_addr_tail (tls_index *ti, dtv_t *dtv, struct link_map *the_map)
variable into static storage, we'll wait until the address in the
static TLS block is set up, and use that. If we're undecided
yet, make sure we make the decision holding the lock as well. */
- if (__glibc_unlikely (the_map->l_tls_offset
+ if (__glibc_unlikely (the_map->l_rw->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET))
{
__rtld_lock_lock_recursive (GL(dl_load_tls_lock));
- if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
+ if (__glibc_likely (the_map->l_rw->l_tls_offset == NO_TLS_OFFSET))
{
- the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
+ the_map->l_rw->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
__rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
}
- else if (__glibc_likely (the_map->l_tls_offset
+ else if (__glibc_likely (the_map->l_rw->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET))
{
#if TLS_TCB_AT_TP
- void *p = (char *) THREAD_SELF - the_map->l_tls_offset;
+ void *p = (char *) THREAD_SELF - the_map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *p = ((char *) THREAD_SELF + the_map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -1223,9 +1224,9 @@ static inline void __attribute__((always_inline))
init_one_static_tls (struct pthread *curp, struct link_map *map)
{
# if TLS_TCB_AT_TP
- void *dest = (char *) curp - map->l_tls_offset;
+ void *dest = (char *) curp - map->l_rw->l_tls_offset;
# elif TLS_DTV_AT_TP
- void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = (char *) curp + map->l_rw->l_tls_offset + TLS_PRE_TCB_SIZE;
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
@@ -163,7 +163,7 @@ elf_get_dynamic_info (struct link_map *l, bool bootstrap,
{
l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
if (l->l_flags_1 & DF_1_NODELETE)
- l->l_nodelete_pending = true;
+ l->l_rw->l_nodelete_pending = true;
/* Only DT_1_SUPPORTED_MASK bits are supported, and we would like
to assert this, but we can't. Users have been setting
@@ -78,7 +78,7 @@ static const struct
for (map = MAPS; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_name, (int) map->l_rw->l_direct_opencount); \
fflush (stdout); \
} \
while (0)
@@ -190,7 +190,7 @@ main (int argc, char *argv[])
if (map->l_type == lt_loaded)
{
printf ("name = \"%s\", direct_opencount = %d\n",
- map->l_name, (int) map->l_direct_opencount);
+ map->l_name, (int) map->l_rw->l_direct_opencount);
result = 1;
}
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -29,7 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %d\n", lm->l_name,
+ (int) lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
@@ -460,6 +460,17 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
interfere with __rtld_static_init. */
GLRO (dl_find_object) = &_dl_find_object;
+ /* Pre-allocated read-write status of the ld.so link map. */
+ static struct
+ {
+ struct link_map_rw l;
+ struct auditstate _dl_rtld_auditstate[DL_NNS];
+ } rtld_map_rw;
+ _dl_rtld_map.l_rw = &rtld_map_rw.l;
+#if NO_TLS_OFFSET != 0
+ _dl_rtld_map.l_rw->l_tls_offset = NO_TLS_OFFSET;
+#endif
+
/* If it hasn't happen yet record the startup time. */
rtld_timer_start (&start_time);
#if !defined DONT_USE_BOOTSTRAP_MAP
@@ -482,7 +493,7 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
/* Copy the TLS related data if necessary. */
#ifndef DONT_USE_BOOTSTRAP_MAP
# if NO_TLS_OFFSET != 0
- _dl_rtld_map.l_tls_offset = NO_TLS_OFFSET;
+ _dl_rtld_map.l_rw->l_tls_offset = NO_TLS_OFFSET;
# endif
#endif
@@ -549,10 +560,6 @@ _dl_start (void *arg)
bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
elf_get_dynamic_info (&bootstrap_map, true, false);
-#if NO_TLS_OFFSET != 0
- bootstrap_map.l_tls_offset = NO_TLS_OFFSET;
-#endif
-
#ifdef ELF_MACHINE_BEFORE_RTLD_RELOC
ELF_MACHINE_BEFORE_RTLD_RELOC (&bootstrap_map, bootstrap_map.l_info);
#endif
@@ -1100,7 +1107,7 @@ rtld_setup_main_map (struct link_map *main_map)
/* Perhaps the executable has no PT_LOAD header entries at all. */
main_map->l_map_start = ~0;
/* And it was opened directly. */
- ++main_map->l_direct_opencount;
+ ++main_map->l_rw->l_direct_opencount;
main_map->l_contiguous = 1;
/* A PT_LOAD segment at an unexpected address will clear the
@@ -34,7 +34,8 @@ do_test (void)
printf ("thread variable address: %p\n", &thread_var);
printf ("thread pointer address: %p\n", __thread_pointer ());
printf ("pthread_self address: %p\n", (void *) pthread_self ());
- ptrdiff_t block_offset = ((struct link_map *) _r_debug.r_map)->l_tls_offset;
+ ptrdiff_t block_offset
+ = ((struct link_map *) _r_debug.r_map)->l_rw->l_tls_offset;
printf ("main program TLS block offset: %td\n", block_offset);
if ((uintptr_t) &thread_var < (uintptr_t) THREAD_SELF)
@@ -15,7 +15,7 @@
for (map = MAPS; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_name, (int) map->l_rw->l_direct_opencount); \
fflush (stdout)
typedef struct
@@ -12,7 +12,7 @@
for (map = MAPS; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_name, (int) map->l_direct_opencount); \
+ map->l_name, (int) map->l_rw->l_direct_opencount); \
fflush (stdout)
int
@@ -217,9 +217,10 @@ __pthread_init_static_tls (struct link_map *map)
continue;
# if TLS_TCB_AT_TP
- void *dest = (char *) t->tcb - map->l_tls_offset;
+ void *dest = (char *) t->tcb - map->l_rw->l_tls_offset;
# elif TLS_DTV_AT_TP
- void *dest = (char *) t->tcb + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = ((char *) t->tcb + map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
@@ -83,6 +83,71 @@ struct r_search_path_struct
extern struct r_search_path_struct __rtld_search_dirs attribute_hidden;
extern struct r_search_path_struct __rtld_env_path_list attribute_hidden;
+
+/* Link map attributes that are always readable and writable. */
+struct link_map_rw
+{
+ /* List of the dependencies introduced through symbol binding. */
+ struct link_map_reldeps
+ {
+ unsigned int act;
+ struct link_map *list[];
+ } *l_reldeps;
+ unsigned int l_reldepsmax;
+
+ /* Reference count for dlopen/dlclose. */
+ unsigned int l_direct_opencount;
+
+ /* For objects present at startup time: offset in the static TLS
+ block. For loaded objects, it can be NO_TLS_OFFSET (not yet
+ initialized), FORCED_DYNAMIC_TLS_OFFSET (if fully dynamic TLS is
+ used), or an actual TLS offset (if the static TLS allocation has
+ been re-used to satisfy dynamic TLS needs).
+
+ This field is written outside the general loader lock, so it has
+ to reside in the read-write porition of the link map. */
+#ifndef NO_TLS_OFFSET
+# define NO_TLS_OFFSET 0
+#endif
+#ifndef FORCED_DYNAMIC_TLS_OFFSET
+# if NO_TLS_OFFSET == 0
+# define FORCED_DYNAMIC_TLS_OFFSET -1
+# elif NO_TLS_OFFSET == -1
+# define FORCED_DYNAMIC_TLS_OFFSET -2
+# else
+# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
+# endif
+#endif
+ ptrdiff_t l_tls_offset;
+
+ /* Number of thread_local objects constructed by this DSO. This is
+ atomically accessed and modified and is not always protected by the load
+ lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
+ size_t l_tls_dtor_count;
+
+ /* Ture if ELF constructors have been called. */
+ bool l_init_called;
+
+ /* NODELETE status of the map. Only valid for maps of type
+ lt_loaded. Lazy binding sets l_nodelete_active directly,
+ potentially from signal handlers. Initial loading of an
+ DF_1_NODELETE object set l_nodelete_pending. Relocation may
+ set l_nodelete_pending as well. l_nodelete_pending maps are
+ promoted to l_nodelete_active status in the final stages of
+ dlopen, prior to calling ELF constructors. dlclose only
+ refuses to unload l_nodelete_active maps, the pending status is
+ ignored. */
+ bool l_nodelete_active;
+ bool l_nodelete_pending;
+
+ /* Used for dependency sorting in dlclose/_dl_fini. These need to
+ be writable all the time because there is no way to report an
+ error in _dl_fini. These flags can be moved into struct
+ link_map_private once _dl_fini no longer re-sorts link maps. */
+ bool l_visited;
+ int l_idx;
+};
+
/* Structure describing a loaded shared object. The `l_next' and `l_prev'
members form a chain of all the shared objects loaded at startup.
@@ -111,6 +176,9 @@ struct link_map
than one namespace. */
struct link_map *l_real;
+ /* Run-time writable fields. */
+ struct link_map_rw *l_rw;
+
/* Number of the namespace this link map belongs to. */
Lmid_t l_ns;
@@ -170,7 +238,6 @@ struct link_map
const Elf_Symndx *l_buckets;
};
- unsigned int l_direct_opencount; /* Reference count for dlopen/dlclose. */
enum /* Where this object came from. */
{
lt_executable, /* The main executable program. */
@@ -180,12 +247,9 @@ struct link_map
unsigned int l_dt_relr_ref:1; /* Nonzero if GLIBC_ABI_DT_RELR is
referenced. */
unsigned int l_relocated:1; /* Nonzero if object's relocations done. */
- unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
unsigned int l_main_map:1; /* Nonzero for the map of the main program. */
- unsigned int l_visited:1; /* Used internally for map dependency
- graph traversal. */
unsigned int l_map_used:1; /* These two bits are used during traversal */
unsigned int l_map_done:1; /* of maps in _dl_close_worker. */
unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
@@ -214,18 +278,6 @@ struct link_map
lt_library map. */
unsigned int l_tls_in_slotinfo:1; /* TLS slotinfo updated in dlopen. */
- /* NODELETE status of the map. Only valid for maps of type
- lt_loaded. Lazy binding sets l_nodelete_active directly,
- potentially from signal handlers. Initial loading of an
- DF_1_NODELETE object set l_nodelete_pending. Relocation may
- set l_nodelete_pending as well. l_nodelete_pending maps are
- promoted to l_nodelete_active status in the final stages of
- dlopen, prior to calling ELF constructors. dlclose only
- refuses to unload l_nodelete_active maps, the pending status is
- ignored. */
- bool l_nodelete_active;
- bool l_nodelete_pending;
-
#include <link_map.h>
/* Collected information about own RPATH directories. */
@@ -277,14 +329,6 @@ struct link_map
/* List of object in order of the init and fini calls. */
struct link_map **l_initfini;
- /* List of the dependencies introduced through symbol binding. */
- struct link_map_reldeps
- {
- unsigned int act;
- struct link_map *list[];
- } *l_reldeps;
- unsigned int l_reldepsmax;
-
/* Nonzero if the DSO is used. */
unsigned int l_used;
@@ -293,9 +337,6 @@ struct link_map
ElfW(Word) l_flags_1;
ElfW(Word) l_flags;
- /* Temporarily used in `dl_close'. */
- int l_idx;
-
struct link_map_machine l_mach;
struct
@@ -318,28 +359,9 @@ struct link_map
size_t l_tls_align;
/* Offset of first byte module alignment. */
size_t l_tls_firstbyte_offset;
-#ifndef NO_TLS_OFFSET
-# define NO_TLS_OFFSET 0
-#endif
-#ifndef FORCED_DYNAMIC_TLS_OFFSET
-# if NO_TLS_OFFSET == 0
-# define FORCED_DYNAMIC_TLS_OFFSET -1
-# elif NO_TLS_OFFSET == -1
-# define FORCED_DYNAMIC_TLS_OFFSET -2
-# else
-# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
-# endif
-#endif
- /* For objects present at startup time: offset in the static TLS block. */
- ptrdiff_t l_tls_offset;
/* Index of the module in the dtv array. */
size_t l_tls_modid;
- /* Number of thread_local objects constructed by this DSO. This is
- atomically accessed and modified and is not always protected by the load
- lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
- size_t l_tls_dtor_count;
-
/* Information used to change permission after the relocations are
done. */
ElfW(Addr) l_relro_addr;
@@ -350,15 +372,16 @@ struct link_map
#include <dl-relocate-ld.h>
-/* Information used by audit modules. For most link maps, this data
- immediate follows the link map in memory. For the dynamic linker,
- it is allocated separately. See link_map_audit_state in
- <ldsodefs.h>. */
+/* Information used by audit modules. An array of size GLRO (naudit)
+ elements follows the l_rw link map data in memory (in some cases
+ conservatively extended to to DL_NNS). */
struct auditstate
{
uintptr_t cookie;
unsigned int bindflags;
};
+_Static_assert (__alignof (struct auditstate) <= __alignof (struct link_map_rw),
+ "auditstate alignment compatible with link_map_rw alignment");
/* This is the hidden instance of struct r_debug_extended used by the
@@ -404,8 +404,9 @@ libc {
_thread_db_dtv_slotinfo_map;
_thread_db_dtv_t_counter;
_thread_db_dtv_t_pointer_val;
+ _thread_db_link_map_l_rw;
_thread_db_link_map_l_tls_modid;
- _thread_db_link_map_l_tls_offset;
+ _thread_db_link_map_rw_l_tls_offset;
_thread_db_list_t_next;
_thread_db_list_t_prev;
_thread_db_pthread_cancelhandling;
@@ -38,6 +38,7 @@ typedef struct
} dtv;
typedef struct link_map link_map;
+typedef struct link_map_rw link_map_rw;
typedef struct rtld_global rtld_global;
typedef struct dtv_slotinfo_list dtv_slotinfo_list;
typedef struct dtv_slotinfo dtv_slotinfo;
@@ -93,7 +93,8 @@ DB_STRUCT (pthread_key_data_level2)
DB_STRUCT_ARRAY_FIELD (pthread_key_data_level2, data)
DB_STRUCT_FIELD (link_map, l_tls_modid)
-DB_STRUCT_FIELD (link_map, l_tls_offset)
+DB_STRUCT_FIELD (link_map, l_rw)
+DB_STRUCT_FIELD (link_map_rw, l_tls_offset)
DB_STRUCT_ARRAY_FIELD (dtv, dtv)
#define pointer_val pointer.val /* Field of anonymous struct in dtv_t. */
@@ -191,9 +191,15 @@ td_thr_tlsbase (const td_thrhandle_t *th,
/* Is the DTV current enough? */
if (dtvgen < modgen)
{
- try_static_tls:
- /* If the module uses Static TLS, we're still good. */
- err = DB_GET_FIELD (temp, th->th_ta_p, map, link_map, l_tls_offset, 0);
+ try_static_tls:;
+ /* If the module uses Static TLS, we're still good. Follow the
+ l_rw pointer to l_tls_offset. */
+ psaddr_t l_rw;
+ err = DB_GET_FIELD (l_rw, th->th_ta_p, map, link_map, l_rw, 0);
+ if (err != TD_OK)
+ return err;
+ err = DB_GET_FIELD (temp, th->th_ta_p, l_rw, link_map_rw,
+ l_tls_offset, 0);
if (err != TD_OK)
return err;
ptrdiff_t tlsoff = (uintptr_t)temp;
@@ -133,7 +133,7 @@ __cxa_thread_atexit_impl (dtor_func func, void *obj, void *dso_symbol)
_dl_close_worker is protected by the dl_load_lock. The execution in
__call_tls_dtors does not really depend on this value beyond the fact that
it should be atomic, so Relaxed MO should be sufficient. */
- atomic_fetch_add_relaxed (&lm_cache->l_tls_dtor_count, 1);
+ atomic_fetch_add_relaxed (&lm_cache->l_rw->l_tls_dtor_count, 1);
__rtld_lock_unlock_recursive (GL(dl_load_lock));
new->map = lm_cache;
@@ -159,7 +159,7 @@ __call_tls_dtors (void)
l_tls_dtor_count decrement. That way, we protect this access from a
potential DSO unload in _dl_close_worker, which happens when
l_tls_dtor_count is 0. See CONCURRENCY NOTES for more detail. */
- atomic_fetch_add_release (&cur->map->l_tls_dtor_count, -1);
+ atomic_fetch_add_release (&cur->map->l_rw->l_tls_dtor_count, -1);
free (cur);
}
}
@@ -249,7 +249,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
else
# endif
{
- td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ + sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
@@ -274,7 +275,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr =
- sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
+ sym->st_value + reloc->r_addend + sym_map->l_rw->l_tls_offset;
}
break;
@@ -401,12 +401,12 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
else if (r_type == R_ALPHA_TPREL64)
{
# ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym_raw_value + map->l_tls_offset;
+ *reloc_addr = sym_raw_value + map->l_rw->l_tls_offset;
# else
if (sym_map)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_raw_value + sym_map->l_tls_offset;
+ *reloc_addr = sym_raw_value + sym_map->l_rw->l_tls_offset;
}
# endif
}
@@ -284,7 +284,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
+ *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
break;
@@ -394,7 +394,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
# endif
# endif
{
- td->argument.value = value + sym_map->l_tls_offset;
+ td->argument.value = value + sym_map->l_rw->l_tls_offset;
td->entry = _dl_tlsdesc_return;
}
}
@@ -424,7 +424,7 @@ elf_machine_rel (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym->st_value + sym_map->l_tls_offset;
+ *reloc_addr += sym->st_value + sym_map->l_rw->l_tls_offset;
}
break;
case R_ARM_IRELATIVE:
@@ -302,7 +302,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = (sym->st_value + sym_map->l_tls_offset
+ *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
}
break;
@@ -1342,15 +1342,9 @@ is_rtld_link_map (const struct link_map *l)
static inline struct auditstate *
link_map_audit_state (struct link_map *l, size_t index)
{
- if (is_rtld_link_map (l))
- /* The auditstate array is stored separately. */
- return _dl_rtld_auditstate + index;
- else
- {
- /* The auditstate array follows the link map in memory. */
- struct auditstate *base = (struct auditstate *) (l + 1);
- return &base[index];
- }
+ /* The auditstate array follows the read-write link map part in memory. */
+ struct auditstate *base = (struct auditstate *) (l->l_rw + 1);
+ return &base[index];
}
/* Call the la_objsearch from the audit modules from the link map L. If
@@ -715,7 +715,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
+ value = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
break;
#endif /* use TLS */
@@ -353,7 +353,8 @@ and creates an unsatisfiable circular dependency.\n",
# endif
# endif
{
- td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ - sym_map->l_rw->l_tls_offset
+ (ElfW(Word))td->arg);
td->entry = _dl_tlsdesc_return;
}
@@ -363,7 +364,7 @@ and creates an unsatisfiable circular dependency.\n",
case R_386_TLS_TPOFF32:
/* The offset is positive, backward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
- *reloc_addr += map->l_tls_offset - sym->st_value;
+ *reloc_addr += map->l_rw->l_tls_offset - sym->st_value;
# else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be subtracted from the
@@ -372,14 +373,14 @@ and creates an unsatisfiable circular dependency.\n",
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym_map->l_tls_offset - sym->st_value;
+ *reloc_addr += sym_map->l_rw->l_tls_offset - sym->st_value;
}
# endif
break;
case R_386_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
- *reloc_addr += sym->st_value - map->l_tls_offset;
+ *reloc_addr += sym->st_value - map->l_rw->l_tls_offset;
# else
/* We know the offset of object the symbol is contained in.
It is a negative value which will be added to the
@@ -387,7 +388,7 @@ and creates an unsatisfiable circular dependency.\n",
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym->st_value - sym_map->l_tls_offset;
+ *reloc_addr += sym->st_value - sym_map->l_rw->l_tls_offset;
}
# endif
break;
@@ -37,7 +37,7 @@ extern void *__tls_get_addr (tls_index *ti);
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) ((sym)->st_value - TLS_DTV_OFFSET)
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a TPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym, reloc) \
- ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
- TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
@@ -262,7 +262,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value + sym_map->l_tls_offset + reloc->r_addend;
+ *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
+ + reloc->r_addend);
}
}
#endif
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
@@ -250,13 +250,13 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
case R_OR1K_TLS_TPOFF:
# ifdef RTLD_BOOTSTRAP
*reloc_addr = sym->st_value + reloc->r_addend +
- map->l_tls_offset - TLS_TCB_SIZE;
+ map->l_rw->l_tls_offset - TLS_TCB_SIZE;
# else
if (sym_map != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = sym->st_value + reloc->r_addend +
- sym_map->l_tls_offset - TLS_TCB_SIZE;
+ sym_map->l_rw->l_tls_offset - TLS_TCB_SIZE;
}
# endif
break;
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a @tprel reloc. */
#define TLS_TPREL_VALUE(sym_map, sym, reloc) \
- ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
- TLS_TP_OFFSET)
/* Compute the value for a @dtprel reloc. */
@@ -354,7 +354,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (!NOT_BOOTSTRAP)
{
reloc_addr[0] = 0;
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
break;
}
@@ -368,7 +368,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
reloc_addr[0] = 0;
/* Set up for local dynamic. */
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
break;
}
@@ -748,7 +748,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
#ifdef RTLD_BOOTSTRAP
reloc_addr[0] = 0;
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
return;
#else
@@ -762,7 +762,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
reloc_addr[0] = 0;
/* Set up for local dynamic. */
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
return;
}
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
@@ -339,7 +339,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
case R_390_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
+ *reloc_addr = (sym->st_value + reloc->r_addend
+ - map->l_rw->l_tls_offset);
#else
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
@@ -348,7 +349,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
}
#endif
break;
@@ -321,7 +321,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
case R_390_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
+ *reloc_addr = (sym->st_value + reloc->r_addend
+ - map->l_rw->l_tls_offset);
#else
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
@@ -330,7 +331,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
}
#endif
break;
@@ -363,7 +363,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
case R_SH_TLS_TPOFF32:
/* The offset is positive, afterward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = map->l_tls_offset + sym->st_value + reloc->r_addend;
+ *reloc_addr = (map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
#else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be added to the thread
@@ -372,8 +373,8 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_map->l_tls_offset + sym->st_value
- + reloc->r_addend;
+ *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
#endif
break;
@@ -371,7 +371,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value - sym_map->l_tls_offset
+ *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
}
break;
@@ -381,7 +381,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym->st_value - sym_map->l_tls_offset
+ value = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
if (r_type == R_SPARC_TLS_LE_HIX22)
*reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
@@ -387,7 +387,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value - sym_map->l_tls_offset
+ *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
}
break;
@@ -397,7 +397,7 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym->st_value - sym_map->l_tls_offset
+ value = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
if (r_type == R_SPARC_TLS_LE_HIX22)
*(unsigned int *)reloc_addr =
@@ -40,7 +40,7 @@ dl_isa_level_check (struct link_map *m, const char *program)
l = m->l_initfini[i];
/* Skip ISA level check if functions have been executed. */
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
continue;
#ifdef SHARED
@@ -383,7 +383,8 @@ and creates an unsatisfiable circular dependency.\n",
else
# endif
{
- td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ - sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
@@ -399,7 +400,7 @@ and creates an unsatisfiable circular dependency.\n",
It is a negative value which will be added to the
thread pointer. */
value = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
# ifdef __ILP32__
/* The symbol and addend values are 32 bits but the GOT
entry is 64 bits wide and the whole 64-bit entry is used