@@ -453,14 +453,14 @@ __init_des_r (struct crypt_data * __restrict __data)
efp[comes_from_word][word_value][o_long] |= mask2;
}
}
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
small_tables_initialized = 1;
#ifdef __GNU_LIBRARY__
small_tables_done:
__libc_lock_unlock(_ufc_tables_lock);
#endif
} else
- atomic_read_barrier ();
+ atomic_thread_fence_acquire ();
/*
* Create the sb tables:
@@ -430,7 +430,7 @@ _dl_map_object_deps (struct link_map *map,
memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
memcpy (&l_initfini[nneeded + 1], l_initfini,
nneeded * sizeof needed[0]);
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
l->l_initfini = l_initfini;
l->l_free_initfini = 1;
}
@@ -555,12 +555,12 @@ _dl_map_object_deps (struct link_map *map,
/* Terminate the list of dependencies. */
l_initfini[nlist] = NULL;
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
map->l_initfini = l_initfini;
map->l_free_initfini = 1;
if (l_reldeps != NULL)
{
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
void *old_l_reldeps = map->l_reldeps;
map->l_reldeps = l_reldeps;
_dl_scope_free (old_l_reldeps);
@@ -532,7 +532,7 @@ add_dependency (struct link_map *undef_map, struct link_map *map, int flags)
= atomic_forced_read (undef_map->l_reldeps);
/* Make sure l_reldeps is read before l_initfini. */
- atomic_read_barrier ();
+ atomic_thread_fence_acquire ();
/* Determine whether UNDEF_MAP already has a reference to MAP. First
look in the normal dependencies. */
@@ -697,7 +697,7 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
l_reldepsact * sizeof (struct link_map *));
newp->list[l_reldepsact] = map;
newp->act = l_reldepsact + 1;
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
void *old = undef_map->l_reldeps;
undef_map->l_reldeps = newp;
undef_map->l_reldepsmax = max;
@@ -708,7 +708,7 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
else
{
undef_map->l_reldeps->list[l_reldepsact] = map;
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
undef_map->l_reldeps->act = l_reldepsact + 1;
}
@@ -202,7 +202,7 @@ add_to_global_update (struct link_map *new)
assert (added <= ns->_ns_global_scope_pending_adds);
ns->_ns_global_scope_pending_adds -= added;
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
ns->_ns_main_searchlist->r_nlist = new_nlist;
}
@@ -342,7 +342,7 @@ update_scopes (struct link_map *new)
might use the new last element and then use the garbage
at offset IDX+1. */
imap->l_scope[cnt + 1] = NULL;
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
imap->l_scope[cnt] = &new->l_searchlist;
from_scope = cnt;
@@ -43,7 +43,7 @@ list_add (list_t *newp, list_t *head)
newp->next = head->next;
newp->prev = head;
head->next->prev = newp;
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
head->next = newp;
}
@@ -111,7 +111,7 @@ static mstate free_list;
malloc_state objects.
Read access to the next member is supposed to synchronize with the
- atomic_write_barrier and the write to the next member in
+ atomic_thread_fence_release and the write to the next member in
_int_new_arena. This suffers from data races; see the FIXME
comments in _int_new_arena and reused_arena.
@@ -770,7 +770,7 @@ _int_new_arena (size_t size)
/* FIXME: The barrier is an attempt to synchronize with read access
in reused_arena, which does not acquire list_lock while
traversing the list. */
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
main_arena.next = a;
__libc_lock_unlock (list_lock);
@@ -2543,14 +2543,14 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c deallocate_stack @asulock @ascuheap @aculock @acsmem
@c lll_lock (state_cache_lock) @asulock @aculock
@c stack_list_del ok
-@c atomic_write_barrier ok
+@c atomic_thread_fence_release ok
@c list_del ok
-@c atomic_write_barrier ok
+@c atomic_thread_fence_release ok
@c queue_stack @ascuheap @acsmem
@c stack_list_add ok
-@c atomic_write_barrier ok
+@c atomic_thread_fence_release ok
@c list_add ok
-@c atomic_write_barrier ok
+@c atomic_thread_fence_release ok
@c free_stacks @ascuheap @acsmem
@c list_for_each_prev_safe ok
@c list_entry ok
@@ -395,7 +395,7 @@ this function is in @file{stdlib.h}.
@c mutex_init ok
@c mutex_lock (just-created mutex) ok, returns locked
@c mutex_lock (list_lock) dup @asulock @aculock
-@c atomic_write_barrier ok
+@c atomic_thread_fence_release ok
@c mutex_unlock (list_lock) @aculock
@c catomic_decrement ok
@c reused_arena @asulock @aculock
@@ -947,7 +947,7 @@ using @code{atexit} or @code{on_exit}.
@c __libc_lock_lock @asulock @aculock
@c calloc dup @ascuheap @acsmem
@c __libc_lock_unlock @aculock
-@c atomic_write_barrier dup ok
+@c atomic_thread_fence_release dup ok
The @code{atexit} function registers the function @var{function} to be
called at normal program termination. The @var{function} is called with
no arguments.
@@ -961,7 +961,7 @@ the function cannot be registered.
@safety{@prelim{}@mtsafe{}@asunsafe{@ascuheap{} @asulock{}}@acunsafe{@aculock{} @acsmem{}}}
@c on_exit @ascuheap @asulock @aculock @acsmem
@c new_exitfn dup @ascuheap @asulock @aculock @acsmem
-@c atomic_write_barrier dup ok
+@c atomic_thread_fence_release dup ok
This function is a somewhat more powerful variant of @code{atexit}. It
accepts two arguments, a function @var{function} and an arbitrary
pointer @var{arg}. At normal program termination, the @var{function} is
@@ -27,11 +27,11 @@ __nptl_stack_list_del (list_t *elem)
{
GL (dl_in_flight_stack) = (uintptr_t) elem;
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
list_del (elem);
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
GL (dl_in_flight_stack) = 0;
}
@@ -42,11 +42,11 @@ __nptl_stack_list_add (list_t *elem, list_t *list)
{
GL (dl_in_flight_stack) = (uintptr_t) elem | 1;
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
list_add (elem, list);
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
GL (dl_in_flight_stack) = 0;
}
@@ -113,7 +113,7 @@ __pthread_mutex_setprioceiling (pthread_mutex_t *mutex, int prioceiling,
newlock = (mutex->__data.__lock & ~PTHREAD_MUTEX_PRIO_CEILING_MASK);
mutex->__data.__lock = newlock
| (prioceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT);
- atomic_full_barrier ();
+ atomic_thread_fence_seq_cst ();
futex_wake ((unsigned int *)&mutex->__data.__lock, INT_MAX,
PTHREAD_MUTEX_PSHARED (mutex));
@@ -90,7 +90,7 @@ __old_sem_post (sem_t *sem)
/* We must need to synchronize with consumers of this token, so the atomic
increment must have release MO semantics. */
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
atomic_fetch_add_release (futex, 1);
/* We always have to assume it is a shared semaphore. */
futex_wake (futex, 1, LLL_SHARED);
@@ -197,7 +197,7 @@ __qsort_r (void *b, size_t n, size_t s, __compar_d_fn_t cmp, void *arg)
phys_pages /= 4;
/* Make sure phys_pages is written to memory. */
- atomic_write_barrier ();
+ atomic_thread_fence_release ();
pagesize = __sysconf (_SC_PAGESIZE);
}
@@ -108,7 +108,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -105,7 +105,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -113,7 +113,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -99,7 +99,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -128,7 +128,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -22,18 +22,6 @@
#include <atomic.h>
-#ifndef atomic_full_barrier
-# define atomic_full_barrier() __asm ("" ::: "memory")
-#endif
-
-#ifndef atomic_read_barrier
-# define atomic_read_barrier() atomic_full_barrier ()
-#endif
-
-#ifndef atomic_write_barrier
-# define atomic_write_barrier() atomic_full_barrier ()
-#endif
-
#ifndef DEFAULT_TOP_PAD
# define DEFAULT_TOP_PAD 131072
#endif
@@ -371,7 +371,7 @@ _dl_lookup_address (const void *address)
/* First load the relocation offset. */
reloc_arg = (ElfW(Word)) desc[1];
- atomic_full_barrier();
+ atomic_thread_fence_seq_cst ();
/* Then load first word of candidate descriptor. It should be a pointer
with word alignment and point to memory that can be read. */
@@ -136,7 +136,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
/* Need to ensure that the gp is visible before the code
entry point is updated */
rfdesc[1] = value.gp;
- atomic_full_barrier();
+ atomic_thread_fence_seq_cst ();
rfdesc[0] = value.ip;
}
else
@@ -133,7 +133,7 @@ static inline void __set_cr27(struct pthread *cr27)
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -33,7 +33,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
{
ASSERT_TYPE_SIZE (pthread_once_t, __SIZEOF_PTHREAD_ONCE_T);
- atomic_full_barrier ();
+ atomic_thread_fence_seq_cst ();
if (once_control->__run == 0)
{
__pthread_spin_wait (&once_control->__lock);
@@ -44,7 +44,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
init_routine ();
pthread_cleanup_pop (0);
- atomic_full_barrier ();
+ atomic_thread_fence_seq_cst ();
once_control->__run = 1;
}
@@ -157,7 +157,7 @@ register struct pthread *__thread_self __asm__("r13");
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -129,7 +129,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -132,7 +132,7 @@ extern void * __m68k_read_tp (void);
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -26,7 +26,7 @@
int
__pthread_mutex_destroy (pthread_mutex_t *mtxp)
{
- atomic_read_barrier ();
+ atomic_thread_fence_acquire ();
if (*(volatile unsigned int *) &mtxp->__lock != 0)
return EBUSY;
@@ -54,7 +54,7 @@
if (ret == EOWNERDEAD) \
{ \
mtxp->__lock = mtxp->__lock | LLL_DEAD_OWNER; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
} \
} \
@@ -110,7 +110,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -160,7 +160,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -140,7 +140,7 @@ register struct pthread *__thread_self __asm__("r23");
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -175,7 +175,7 @@ register tcbhead_t *__thread_self __asm__("r10");
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -224,7 +224,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -123,7 +123,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -167,7 +167,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -139,7 +139,7 @@ typedef struct
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)
@@ -140,7 +140,7 @@ register struct pthread *__thread_self __asm__("%g7");
do \
{ \
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
- atomic_write_barrier (); \
+ atomic_thread_fence_release (); \
} \
while (0)