@@ -174,10 +174,10 @@ __libc_setup_tls (void)
#if TLS_TCB_AT_TP
_dl_static_dtv[2].pointer.val = ((char *) tlsblock + tcb_offset
- roundup (memsz, align ?: 1));
- main_map->l_tls_offset = roundup (memsz, align ?: 1);
+ main_map->l_rw->l_tls_offset = roundup (memsz, align ?: 1);
#elif TLS_DTV_AT_TP
_dl_static_dtv[2].pointer.val = (char *) tlsblock + tcb_offset;
- main_map->l_tls_offset = tcb_offset;
+ main_map->l_rw->l_tls_offset = tcb_offset;
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -513,8 +513,8 @@ _dl_close_worker (struct link_map_private *map, bool force)
atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
GL(dl_tls_static_nelem));
- if (imap->l_tls_offset != NO_TLS_OFFSET
- && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
+ if (imap->l_rw->l_tls_offset != NO_TLS_OFFSET
+ && imap->l_rw->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
{
/* Collect a contiguous chunk built from the objects in
this search list, going in either direction. When the
@@ -522,19 +522,19 @@ _dl_close_worker (struct link_map_private *map, bool force)
reclaim it. */
#if TLS_TCB_AT_TP
if (tls_free_start == NO_TLS_OFFSET
- || (size_t) imap->l_tls_offset == tls_free_start)
+ || (size_t) imap->l_rw->l_tls_offset == tls_free_start)
{
/* Extend the contiguous chunk being reclaimed. */
tls_free_start
- = imap->l_tls_offset - imap->l_tls_blocksize;
+ = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
if (tls_free_end == NO_TLS_OFFSET)
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
}
- else if (imap->l_tls_offset - imap->l_tls_blocksize
+ else if (imap->l_rw->l_tls_offset - imap->l_tls_blocksize
== tls_free_end)
/* Extend the chunk backwards. */
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
else
{
/* This isn't contiguous with the last chunk freed.
@@ -543,19 +543,20 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (tls_free_end == GL(dl_tls_static_used))
{
GL(dl_tls_static_used) = tls_free_start;
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
tls_free_start
= tls_free_end - imap->l_tls_blocksize;
}
- else if ((size_t) imap->l_tls_offset
+ else if ((size_t) imap->l_rw->l_tls_offset
== GL(dl_tls_static_used))
GL(dl_tls_static_used)
- = imap->l_tls_offset - imap->l_tls_blocksize;
- else if (tls_free_end < (size_t) imap->l_tls_offset)
+ = imap->l_rw->l_tls_offset - imap->l_tls_blocksize;
+ else if (tls_free_end
+ < (size_t) imap->l_rw->l_tls_offset)
{
/* We pick the later block. It has a chance to
be freed. */
- tls_free_end = imap->l_tls_offset;
+ tls_free_end = imap->l_rw->l_tls_offset;
tls_free_start
= tls_free_end - imap->l_tls_blocksize;
}
@@ -564,34 +565,37 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (tls_free_start == NO_TLS_OFFSET)
{
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = (imap->l_tls_offset
+ tls_free_end = (imap->l_rw->l_tls_offset
+ imap->l_tls_blocksize);
}
else if (imap->l_tls_firstbyte_offset == tls_free_end)
/* Extend the contiguous chunk being reclaimed. */
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
- else if (imap->l_tls_offset + imap->l_tls_blocksize
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
+ else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
== tls_free_start)
/* Extend the chunk backwards. */
tls_free_start = imap->l_tls_firstbyte_offset;
/* This isn't contiguous with the last chunk freed.
One of them will be leaked unless we can free
one block right away. */
- else if (imap->l_tls_offset + imap->l_tls_blocksize
+ else if (imap->l_rw->l_tls_offset + imap->l_tls_blocksize
== GL(dl_tls_static_used))
GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
else if (tls_free_end == GL(dl_tls_static_used))
{
GL(dl_tls_static_used) = tls_free_start;
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
}
else if (tls_free_end < imap->l_tls_firstbyte_offset)
{
/* We pick the later block. It has a chance to
be freed. */
tls_free_start = imap->l_tls_firstbyte_offset;
- tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
+ tls_free_end = (imap->l_rw->l_tls_offset
+ + imap->l_tls_blocksize);
}
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
@@ -137,7 +137,7 @@ _dl_new_object (char *realname, const char *libname, int type,
new->l_used = 1;
new->l_loader = loader;
#if NO_TLS_OFFSET != 0
- new->l_tls_offset = NO_TLS_OFFSET;
+ new->l_rw->l_tls_offset = NO_TLS_OFFSET;
#endif
new->l_ns = nsid;
@@ -41,7 +41,7 @@
dynamically loaded. This can only work if there is enough surplus in
the static TLS area already allocated for each running thread. If this
object's TLS segment is too big to fit, we fail with -1. If it fits,
- we set MAP->l_tls_offset and return 0.
+ we set MAP->l_rw->l_tls_offset and return 0.
A portion of the surplus static TLS can be optionally used to optimize
dynamic TLS access (with TLSDESC or powerpc TLS optimizations).
If OPTIONAL is true then TLS is allocated for such optimization and
@@ -53,7 +53,7 @@ _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
{
/* If we've already used the variable with dynamic access, or if the
alignment requirements are too high, fail. */
- if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| map->l_tls_align > GLRO (dl_tls_static_align))
{
fail:
@@ -81,7 +81,7 @@ _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
size_t offset = GL(dl_tls_static_used) + use;
- map->l_tls_offset = GL(dl_tls_static_used) = offset;
+ map->l_rw->l_tls_offset = GL(dl_tls_static_used) = offset;
#elif TLS_DTV_AT_TP
/* dl_tls_static_used includes the TCB at the beginning. */
size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
@@ -100,7 +100,7 @@ _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
else if (optional)
GL(dl_tls_static_optional) -= use;
- map->l_tls_offset = offset;
+ map->l_rw->l_tls_offset = offset;
map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
GL(dl_tls_static_used) = used;
#else
@@ -134,7 +134,7 @@ void
__attribute_noinline__
_dl_allocate_static_tls (struct link_map_private *map)
{
- if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ if (map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
|| _dl_try_allocate_static_tls (map, false))
{
_dl_signal_error (0, map->l_public.l_name, NULL, N_("\
@@ -150,9 +150,10 @@ void
_dl_nothread_init_static_tls (struct link_map_private *map)
{
#if TLS_TCB_AT_TP
- void *dest = (char *) THREAD_SELF - map->l_tls_offset;
+ void *dest = (char *) THREAD_SELF - map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = ((char *) THREAD_SELF + map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -29,8 +29,8 @@
can't be done, we fall back to the error that DF_STATIC_TLS is
intended to produce. */
#define HAVE_STATIC_TLS(map, sym_map) \
- (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET \
- && ((sym_map)->l_tls_offset \
+ (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET \
+ && ((sym_map)->l_rw->l_tls_offset \
!= FORCED_DYNAMIC_TLS_OFFSET), 1))
#define CHECK_STATIC_TLS(map, sym_map) \
@@ -40,9 +40,9 @@
} while (0)
#define TRY_STATIC_TLS(map, sym_map) \
- (__builtin_expect ((sym_map)->l_tls_offset \
+ (__builtin_expect ((sym_map)->l_rw->l_tls_offset \
!= FORCED_DYNAMIC_TLS_OFFSET, 1) \
- && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
+ && (__builtin_expect ((sym_map)->l_rw->l_tls_offset != NO_TLS_OFFSET, 1)\
|| _dl_try_allocate_static_tls (sym_map, true) == 0))
int _dl_try_allocate_static_tls (struct link_map_private *map, bool optional)
@@ -81,7 +81,10 @@ int _dl_bind_not;
static struct link_map_private _dl_main_map =
{
.l_public = { .l_name = (char *) "", },
- .l_rw = &(struct link_map_rw) { },
+ .l_rw = &(struct link_map_rw)
+ {
+ .l_tls_offset = NO_TLS_OFFSET,
+ },
.l_real = &_dl_main_map,
.l_ns = LM_ID_BASE,
.l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
@@ -101,7 +104,6 @@ static struct link_map_private _dl_main_map =
.l_scope = _dl_main_map.l_scope_mem,
.l_local_scope = { &_dl_main_map.l_searchlist },
.l_used = 1,
- .l_tls_offset = NO_TLS_OFFSET,
.l_serial = 1,
};
@@ -278,7 +278,7 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- slotinfo[cnt].map->l_tls_offset = off;
+ slotinfo[cnt].map->l_rw->l_tls_offset = off;
continue;
}
}
@@ -295,7 +295,7 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- slotinfo[cnt].map->l_tls_offset = off;
+ slotinfo[cnt].map->l_rw->l_tls_offset = off;
}
GL(dl_tls_static_used) = offset;
@@ -322,7 +322,7 @@ _dl_determine_tlsoffset (void)
off += slotinfo[cnt].map->l_tls_align;
if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
{
- slotinfo[cnt].map->l_tls_offset = off - firstbyte;
+ slotinfo[cnt].map->l_rw->l_tls_offset = off - firstbyte;
freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
- firstbyte);
continue;
@@ -333,7 +333,7 @@ _dl_determine_tlsoffset (void)
if (off - offset < firstbyte)
off += slotinfo[cnt].map->l_tls_align;
- slotinfo[cnt].map->l_tls_offset = off - firstbyte;
+ slotinfo[cnt].map->l_rw->l_tls_offset = off - firstbyte;
if (off - firstbyte - offset > freetop - freebottom)
{
freebottom = offset;
@@ -573,17 +573,17 @@ _dl_allocate_tls_init (void *result, bool init_tls)
dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
dtv[map->l_tls_modid].pointer.to_free = NULL;
- if (map->l_tls_offset == NO_TLS_OFFSET
- || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
+ if (map->l_rw->l_tls_offset == NO_TLS_OFFSET
+ || map->l_rw->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
continue;
assert (map->l_tls_modid == total + cnt);
assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
#if TLS_TCB_AT_TP
- assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
- dest = (char *) result - map->l_tls_offset;
+ assert ((size_t) map->l_rw->l_tls_offset >= map->l_tls_blocksize);
+ dest = (char *) result - map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- dest = (char *) result + map->l_tls_offset;
+ dest = (char *) result + map->l_rw->l_tls_offset;
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -872,22 +872,23 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map_private *the_map)
variable into static storage, we'll wait until the address in the
static TLS block is set up, and use that. If we're undecided
yet, make sure we make the decision holding the lock as well. */
- if (__glibc_unlikely (the_map->l_tls_offset
+ if (__glibc_unlikely (the_map->l_rw->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET))
{
__rtld_lock_lock_recursive (GL(dl_load_tls_lock));
- if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
+ if (__glibc_likely (the_map->l_rw->l_tls_offset == NO_TLS_OFFSET))
{
- the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
+ the_map->l_rw->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
__rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
}
- else if (__glibc_likely (the_map->l_tls_offset
+ else if (__glibc_likely (the_map->l_rw->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET))
{
#if TLS_TCB_AT_TP
- void *p = (char *) THREAD_SELF - the_map->l_tls_offset;
+ void *p = (char *) THREAD_SELF - the_map->l_rw->l_tls_offset;
#elif TLS_DTV_AT_TP
- void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *p = ((char *) THREAD_SELF + the_map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -1076,9 +1077,9 @@ static inline void __attribute__((always_inline))
init_one_static_tls (struct pthread *curp, struct link_map_private *map)
{
# if TLS_TCB_AT_TP
- void *dest = (char *) curp - map->l_tls_offset;
+ void *dest = (char *) curp - map->l_rw->l_tls_offset;
# elif TLS_DTV_AT_TP
- void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = (char *) curp + map->l_rw->l_tls_offset + TLS_PRE_TCB_SIZE;
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
@@ -460,6 +460,9 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
static struct link_map_rw rtld_map_rw;
GL (dl_rtld_map).l_rw = &rtld_map_rw;
+#if NO_TLS_OFFSET != 0
+ GL (dl_rtld_map).l_rw->l_tls_offset = NO_TLS_OFFSET;
+#endif
/* If it hasn't happen yet record the startup time. */
rtld_timer_start (&start_time);
@@ -481,12 +484,6 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
- /* Copy the TLS related data if necessary. */
-#ifndef DONT_USE_BOOTSTRAP_MAP
-# if NO_TLS_OFFSET != 0
- GL(dl_rtld_map).l_tls_offset = NO_TLS_OFFSET;
-# endif
-#endif
/* Initialize the stack end variable. */
__libc_stack_end = __builtin_frame_address (0);
@@ -552,10 +549,6 @@ _dl_start (void *arg)
bootstrap_map.l_ld_readonly = DL_RO_DYN_SECTION;
elf_get_dynamic_info (&bootstrap_map, true, false);
-#if NO_TLS_OFFSET != 0
- bootstrap_map.l_tls_offset = NO_TLS_OFFSET;
-#endif
-
#ifdef ELF_MACHINE_BEFORE_RTLD_RELOC
ELF_MACHINE_BEFORE_RTLD_RELOC (&bootstrap_map, bootstrap_map.l_info);
#endif
@@ -217,9 +217,10 @@ __pthread_init_static_tls (struct link_map_private *map)
continue;
# if TLS_TCB_AT_TP
- void *dest = (char *) t->tcb - map->l_tls_offset;
+ void *dest = (char *) t->tcb - map->l_rw->l_tls_offset;
# elif TLS_DTV_AT_TP
- void *dest = (char *) t->tcb + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+ void *dest = ((char *) t->tcb + map->l_rw->l_tls_offset
+ + TLS_PRE_TCB_SIZE);
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
@@ -91,6 +91,28 @@ struct link_map_rw
accessor function below. */
unsigned int l_direct_opencount;
+ /* For objects present at startup time: offset in the static TLS
+ block. For loaded objects, it can be NO_TLS_OFFSET (not yet
+ initialized), FORCED_DYNAMIC_TLS_OFFSET (if fully dynamic TLS is
+ used), or an actual TLS offset (if the static TLS allocation has
+ been re-used to satisfy dynamic TLS needs).
+
+ This field is written outside the general loader lock, so it has
+ to reside in the read-write porition of the link map. */
+#ifndef NO_TLS_OFFSET
+# define NO_TLS_OFFSET 0
+#endif
+#ifndef FORCED_DYNAMIC_TLS_OFFSET
+# if NO_TLS_OFFSET == 0
+# define FORCED_DYNAMIC_TLS_OFFSET -1
+# elif NO_TLS_OFFSET == -1
+# define FORCED_DYNAMIC_TLS_OFFSET -2
+# else
+# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
+# endif
+#endif
+ ptrdiff_t l_tls_offset;
+
/* Number of thread_local objects constructed by this DSO. This is
atomically accessed and modified and is not always protected by the load
lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
@@ -322,20 +344,6 @@ struct link_map_private
size_t l_tls_align;
/* Offset of first byte module alignment. */
size_t l_tls_firstbyte_offset;
-#ifndef NO_TLS_OFFSET
-# define NO_TLS_OFFSET 0
-#endif
-#ifndef FORCED_DYNAMIC_TLS_OFFSET
-# if NO_TLS_OFFSET == 0
-# define FORCED_DYNAMIC_TLS_OFFSET -1
-# elif NO_TLS_OFFSET == -1
-# define FORCED_DYNAMIC_TLS_OFFSET -2
-# else
-# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
-# endif
-#endif
- /* For objects present at startup time: offset in the static TLS block. */
- ptrdiff_t l_tls_offset;
/* Index of the module in the dtv array. */
size_t l_tls_modid;
@@ -404,8 +404,9 @@ libc {
_thread_db_dtv_slotinfo_map;
_thread_db_dtv_t_counter;
_thread_db_dtv_t_pointer_val;
+ _thread_db_link_map_l_rw;
_thread_db_link_map_l_tls_modid;
- _thread_db_link_map_l_tls_offset;
+ _thread_db_link_map_rw_l_tls_offset;
_thread_db_list_t_next;
_thread_db_list_t_prev;
_thread_db_pthread_cancelhandling;
@@ -38,6 +38,7 @@ typedef struct
} dtv;
typedef struct link_map_private link_map;
+typedef struct link_map_rw link_map_rw;
typedef struct rtld_global rtld_global;
typedef struct dtv_slotinfo_list dtv_slotinfo_list;
typedef struct dtv_slotinfo dtv_slotinfo;
@@ -93,7 +93,8 @@ DB_STRUCT (pthread_key_data_level2)
DB_STRUCT_ARRAY_FIELD (pthread_key_data_level2, data)
DB_STRUCT_FIELD (link_map, l_tls_modid)
-DB_STRUCT_FIELD (link_map, l_tls_offset)
+DB_STRUCT_FIELD (link_map, l_rw)
+DB_STRUCT_FIELD (link_map_rw, l_tls_offset)
DB_STRUCT_ARRAY_FIELD (dtv, dtv)
#define pointer_val pointer.val /* Field of anonymous struct in dtv_t. */
@@ -191,9 +191,15 @@ td_thr_tlsbase (const td_thrhandle_t *th,
/* Is the DTV current enough? */
if (dtvgen < modgen)
{
- try_static_tls:
- /* If the module uses Static TLS, we're still good. */
- err = DB_GET_FIELD (temp, th->th_ta_p, map, link_map, l_tls_offset, 0);
+ try_static_tls:;
+ /* If the module uses Static TLS, we're still good. Follow the
+ l_rw pointer to l_tls_offset. */
+ psaddr_t l_rw;
+ err = DB_GET_FIELD (l_rw, th->th_ta_p, map, link_map, l_rw, 0);
+ if (err != TD_OK)
+ return err;
+ err = DB_GET_FIELD (temp, th->th_ta_p, l_rw, link_map_rw,
+ l_tls_offset, 0);
if (err != TD_OK)
return err;
ptrdiff_t tlsoff = (uintptr_t)temp;
@@ -250,7 +250,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
else
# endif
{
- td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ + sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
@@ -275,7 +276,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr =
- sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
+ sym->st_value + reloc->r_addend + sym_map->l_rw->l_tls_offset;
}
break;
@@ -402,12 +402,12 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
else if (r_type == R_ALPHA_TPREL64)
{
# ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym_raw_value + map->l_tls_offset;
+ *reloc_addr = sym_raw_value + map->l_rw->l_tls_offset;
# else
if (sym_map)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_raw_value + sym_map->l_tls_offset;
+ *reloc_addr = sym_raw_value + sym_map->l_rw->l_tls_offset;
}
# endif
}
@@ -285,7 +285,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
+ *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
break;
@@ -406,7 +406,7 @@ elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
# endif
# endif
{
- td->argument.value = value + sym_map->l_tls_offset;
+ td->argument.value = value + sym_map->l_rw->l_tls_offset;
td->entry = _dl_tlsdesc_return;
}
}
@@ -436,7 +436,7 @@ elf_machine_rel (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym->st_value + sym_map->l_tls_offset;
+ *reloc_addr += sym->st_value + sym_map->l_rw->l_tls_offset;
}
break;
case R_ARM_IRELATIVE:
@@ -303,7 +303,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = (sym->st_value + sym_map->l_tls_offset
+ *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
}
break;
@@ -716,7 +716,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym_map->l_tls_offset + sym->st_value + reloc->r_addend;
+ value = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
break;
#endif /* use TLS */
@@ -361,7 +361,8 @@ and creates an unsatisfiable circular dependency.\n",
# endif
# endif
{
- td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ - sym_map->l_rw->l_tls_offset
+ (ElfW(Word))td->arg);
td->entry = _dl_tlsdesc_return;
}
@@ -371,7 +372,7 @@ and creates an unsatisfiable circular dependency.\n",
case R_386_TLS_TPOFF32:
/* The offset is positive, backward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
- *reloc_addr += map->l_tls_offset - sym->st_value;
+ *reloc_addr += map->l_rw->l_tls_offset - sym->st_value;
# else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be subtracted from the
@@ -380,14 +381,14 @@ and creates an unsatisfiable circular dependency.\n",
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym_map->l_tls_offset - sym->st_value;
+ *reloc_addr += sym_map->l_rw->l_tls_offset - sym->st_value;
}
# endif
break;
case R_386_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
# ifdef RTLD_BOOTSTRAP
- *reloc_addr += sym->st_value - map->l_tls_offset;
+ *reloc_addr += sym->st_value - map->l_rw->l_tls_offset;
# else
/* We know the offset of object the symbol is contained in.
It is a negative value which will be added to the
@@ -395,7 +396,7 @@ and creates an unsatisfiable circular dependency.\n",
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr += sym->st_value - sym_map->l_tls_offset;
+ *reloc_addr += sym->st_value - sym_map->l_rw->l_tls_offset;
}
# endif
break;
@@ -399,7 +399,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
#ifndef RTLD_BOOTSTRAP
CHECK_STATIC_TLS (map, sym_map);
#endif
- value += sym_map->l_tls_offset - sym_map->l_public.l_addr;
+ value += sym_map->l_rw->l_tls_offset - sym_map->l_public.l_addr;
}
else
_dl_reloc_bad_type (map, r_type, 0);
@@ -32,7 +32,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) ((sym)->st_value - TLS_DTV_OFFSET)
@@ -33,7 +33,7 @@ typedef struct
/* Compute the value for a TPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym, reloc) \
- ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
- TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
@@ -263,7 +263,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value + sym_map->l_tls_offset + reloc->r_addend;
+ *reloc_addr = (sym->st_value + sym_map->l_rw->l_tls_offset
+ + reloc->r_addend);
}
}
#endif
@@ -33,7 +33,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
@@ -33,7 +33,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
@@ -251,13 +251,13 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
case R_OR1K_TLS_TPOFF:
# ifdef RTLD_BOOTSTRAP
*reloc_addr = sym->st_value + reloc->r_addend +
- map->l_tls_offset - TLS_TCB_SIZE;
+ map->l_rw->l_tls_offset - TLS_TCB_SIZE;
# else
if (sym_map != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = sym->st_value + reloc->r_addend +
- sym_map->l_tls_offset - TLS_TCB_SIZE;
+ sym_map->l_rw->l_tls_offset - TLS_TCB_SIZE;
}
# endif
break;
@@ -35,7 +35,7 @@ typedef struct
/* Compute the value for a @tprel reloc. */
#define TLS_TPREL_VALUE(sym_map, sym, reloc) \
- ((sym_map)->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value + (reloc)->r_addend \
- TLS_TP_OFFSET)
/* Compute the value for a @dtprel reloc. */
@@ -355,7 +355,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (!NOT_BOOTSTRAP)
{
reloc_addr[0] = 0;
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
break;
}
@@ -369,7 +369,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
reloc_addr[0] = 0;
/* Set up for local dynamic. */
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
break;
}
@@ -732,7 +732,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
#ifdef RTLD_BOOTSTRAP
reloc_addr[0] = 0;
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
return;
#else
@@ -746,7 +746,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
reloc_addr[0] = 0;
/* Set up for local dynamic. */
- reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+ reloc_addr[1] = (sym_map->l_rw->l_tls_offset - TLS_TP_OFFSET
+ TLS_DTV_OFFSET);
return;
}
@@ -33,7 +33,7 @@ typedef struct
/* Compute the value for a GOTTPREL reloc. */
#define TLS_TPREL_VALUE(sym_map, sym) \
- ((sym_map)->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
+ ((sym_map)->l_rw->l_tls_offset + (sym)->st_value - TLS_TP_OFFSET)
/* Compute the value for a DTPREL reloc. */
#define TLS_DTPREL_VALUE(sym) \
@@ -357,7 +357,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
case R_390_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
+ *reloc_addr = (sym->st_value + reloc->r_addend
+ - map->l_rw->l_tls_offset);
#else
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
@@ -366,7 +367,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
}
#endif
break;
@@ -333,7 +333,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
case R_390_TLS_TPOFF:
/* The offset is negative, forward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = sym->st_value + reloc->r_addend - map->l_tls_offset;
+ *reloc_addr = (sym->st_value + reloc->r_addend
+ - map->l_rw->l_tls_offset);
#else
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
@@ -342,7 +343,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
{
CHECK_STATIC_TLS (map, sym_map);
*reloc_addr = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
}
#endif
break;
@@ -364,7 +364,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
case R_SH_TLS_TPOFF32:
/* The offset is positive, afterward from the thread pointer. */
#ifdef RTLD_BOOTSTRAP
- *reloc_addr = map->l_tls_offset + sym->st_value + reloc->r_addend;
+ *reloc_addr = (map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
#else
/* We know the offset of object the symbol is contained in.
It is a positive value which will be added to the thread
@@ -373,8 +374,8 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym_map->l_tls_offset + sym->st_value
- + reloc->r_addend;
+ *reloc_addr = (sym_map->l_rw->l_tls_offset + sym->st_value
+ + reloc->r_addend);
}
#endif
break;
@@ -378,7 +378,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value - sym_map->l_tls_offset
+ *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
}
break;
@@ -388,7 +388,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym->st_value - sym_map->l_tls_offset
+ value = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
if (r_type == R_SPARC_TLS_LE_HIX22)
*reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
@@ -387,7 +387,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- *reloc_addr = sym->st_value - sym_map->l_tls_offset
+ *reloc_addr = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
}
break;
@@ -397,7 +397,7 @@ elf_machine_rela (struct link_map_private *map, struct r_scope_elem *scope[],
if (sym != NULL)
{
CHECK_STATIC_TLS (map, sym_map);
- value = sym->st_value - sym_map->l_tls_offset
+ value = sym->st_value - sym_map->l_rw->l_tls_offset
+ reloc->r_addend;
if (r_type == R_SPARC_TLS_LE_HIX22)
*(unsigned int *)reloc_addr =
@@ -375,7 +375,8 @@ and creates an unsatisfiable circular dependency.\n",
else
# endif
{
- td->arg = (void*)(sym->st_value - sym_map->l_tls_offset
+ td->arg = (void*)(sym->st_value
+ - sym_map->l_rw->l_tls_offset
+ reloc->r_addend);
td->entry = _dl_tlsdesc_return;
}
@@ -391,7 +392,7 @@ and creates an unsatisfiable circular dependency.\n",
It is a negative value which will be added to the
thread pointer. */
value = (sym->st_value + reloc->r_addend
- - sym_map->l_tls_offset);
+ - sym_map->l_rw->l_tls_offset);
# ifdef __ILP32__
/* The symbol and addend values are 32 bits but the GOT
entry is 64 bits wide and the whole 64-bit entry is used