[4/7] Remove atomic_and/or

Message ID AM5PR0801MB166830503363075BD787455B83809@AM5PR0801MB1668.eurprd08.prod.outlook.com
State Dropped
Headers
Series [1/7] Use atomic_exchange_release/acquire |

Checks

Context Check Description
dj/TryBot-apply_patch fail Patch failed to apply to master at the time it was sent

Commit Message

Wilco Dijkstra July 6, 2022, 3:15 p.m. UTC
  Replace the 4 uses of atomic_and and atomic_or with atomic_fetch_and_acquire and
atomic_fetch_or_acquire.

---
  

Patch

diff --git a/include/atomic.h b/include/atomic.h
index ed1fc38e7569fcbdd0473ab6f69956a44d62354f..fd1a1eb0f87d288251d176d69246cff2d3422af7 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -255,23 +255,6 @@ 
 #endif
 
 
-/* Atomically *mem &= mask.  */
-#ifndef atomic_and
-# define atomic_and(mem, mask) \
-  do {									      \
-    __typeof (*(mem)) __atg15_old;					      \
-    __typeof (mem) __atg15_memp = (mem);				      \
-    __typeof (*(mem)) __atg15_mask = (mask);				      \
-									      \
-    do									      \
-      __atg15_old = (*__atg15_memp);					      \
-    while (__builtin_expect						      \
-	   (atomic_compare_and_exchange_bool_acq (__atg15_memp,		      \
-						  __atg15_old & __atg15_mask, \
-						  __atg15_old), 0));	      \
-  } while (0)
-#endif
-
 /* Atomically *mem &= mask and return the old value of *mem.  */
 #ifndef atomic_and_val
 # define atomic_and_val(mem, mask) \
@@ -289,23 +272,6 @@ 
      __atg16_old; })
 #endif
 
-/* Atomically *mem |= mask and return the old value of *mem.  */
-#ifndef atomic_or
-# define atomic_or(mem, mask) \
-  do {									      \
-    __typeof (*(mem)) __atg17_old;					      \
-    __typeof (mem) __atg17_memp = (mem);				      \
-    __typeof (*(mem)) __atg17_mask = (mask);				      \
-									      \
-    do									      \
-      __atg17_old = (*__atg17_memp);					      \
-    while (__builtin_expect						      \
-	   (atomic_compare_and_exchange_bool_acq (__atg17_memp,		      \
-						  __atg17_old | __atg17_mask, \
-						  __atg17_old), 0));	      \
-  } while (0)
-#endif
-
 /* Atomically *mem |= mask and return the old value of *mem.  */
 #ifndef atomic_or_val
 # define atomic_or_val(mem, mask) \
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 4b7f3edc384748f300ca935ad878eb0e3547e163..1e6b385787aab5fa2f452de0cd2eae2b5f0fba83 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -539,7 +539,7 @@  start_thread (void *arg)
 # endif
 	  this->__list.__next = NULL;
 
-	  atomic_or (&this->__lock, FUTEX_OWNER_DIED);
+	  atomic_fetch_or_acquire (&this->__lock, FUTEX_OWNER_DIED);
 	  futex_wake ((unsigned int *) &this->__lock, 1,
 		      /* XYZ */ FUTEX_SHARED);
 	}
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index 6e767a87247063c0ac84242ef13e72af79021104..439b1e6391c50d5922dec6c48e7f2a2a632a89d9 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -462,7 +462,7 @@  __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 
 	if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
 	  {
-	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+	    atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
 
 	    /* We got the mutex.  */
 	    mutex->__data.__count = 1;
diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index 0fcaabfb482546fd6f1f9cc4b13edc82f6e6796c..af70a60528cb101c8e52d4165950ee0d11f6f895 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -392,7 +392,7 @@  __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
 
 	if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
 	  {
-	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+	    atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
 
 	    /* We got the mutex.  */
 	    mutex->__data.__count = 1;
diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
index 8a7de8e598803f606899fe1c9b8775bc24dd14ec..50524942a76c753ce4add20c35dfe7f659a1908b 100644
--- a/nptl/pthread_mutex_trylock.c
+++ b/nptl/pthread_mutex_trylock.c
@@ -308,7 +308,7 @@  ___pthread_mutex_trylock (pthread_mutex_t *mutex)
 
 	if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
 	  {
-	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+	    atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
 
 	    /* We got the mutex.  */
 	    mutex->__data.__count = 1;
diff --git a/sysdeps/s390/atomic-machine.h b/sysdeps/s390/atomic-machine.h
index db31e377970c4ab6285ef65fb21419db7c6ca373..d2fc3cf240888ca3569c6d3b1287cc87209cab89 100644
--- a/sysdeps/s390/atomic-machine.h
+++ b/sysdeps/s390/atomic-machine.h
@@ -80,27 +80,3 @@ 
 # define atomic_exchange_and_add_rel(mem, operand)			\
   ({ __atomic_check_size((mem));					\
   __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
-
-/* Atomically *mem |= mask and return the old value of *mem.  */
-/* The gcc builtin uses load-and-or instruction on z196 zarch and higher cpus
-   instead of a loop with compare-and-swap instruction.  */
-#define atomic_or_val(mem, operand)					\
-  ({ __atomic_check_size((mem));					\
-  __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
-/* Atomically *mem |= mask.  */
-#define atomic_or(mem, mask)			\
-  do {						\
-    atomic_or_val (mem, mask);			\
-  } while (0)
-
-/* Atomically *mem &= mask and return the old value of *mem.  */
-/* The gcc builtin uses load-and-and instruction on z196 zarch and higher cpus
-   instead of a loop with compare-and-swap instruction.  */
-#define atomic_and_val(mem, operand)					\
-  ({ __atomic_check_size((mem));					\
-  __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
-/* Atomically *mem &= mask.  */
-#define atomic_and(mem, mask)			\
-  do {						\
-    atomic_and_val (mem, mask);			\
-  } while (0)
diff --git a/sysdeps/x86/atomic-machine.h b/sysdeps/x86/atomic-machine.h
index 6adc219bf69f1507624618ed931dad11fea4e150..2abab58628e88bf2f5fc980d2de6b0dd556f6cd4 100644
--- a/sysdeps/x86/atomic-machine.h
+++ b/sysdeps/x86/atomic-machine.h
@@ -292,54 +292,6 @@ 
      __result; })
 
 
-#define __arch_and_body(lock, mem, mask) \
-  do {									      \
-    if (sizeof (*mem) == 1)						      \
-      __asm __volatile (lock "andb %b1, %0"				      \
-			: "=m" (*mem)					      \
-			: IBR_CONSTRAINT (mask), "m" (*mem));		      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (lock "andw %w1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (lock "andl %1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else if (__HAVE_64B_ATOMICS)					      \
-      __asm __volatile (lock "andq %q1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else								      \
-      __atomic_link_error ();						      \
-  } while (0)
-
-#define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
-
-#define __arch_or_body(lock, mem, mask) \
-  do {									      \
-    if (sizeof (*mem) == 1)						      \
-      __asm __volatile (lock "orb %b1, %0"				      \
-			: "=m" (*mem)					      \
-			: IBR_CONSTRAINT (mask), "m" (*mem));		      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (lock "orw %w1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (lock "orl %1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else if (__HAVE_64B_ATOMICS)					      \
-      __asm __volatile (lock "orq %q1, %0"				      \
-			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
-    else								      \
-      __atomic_link_error ();						      \
-  } while (0)
-
-#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
-
 /* We don't use mfence because it is supposedly slower due to having to
    provide stronger guarantees (e.g., regarding self-modifying code).  */
 #define atomic_full_barrier() \