[v2,1/6] nptl: Fix pthread_cond_signal missing a sleeper (#BZ 25847)

Message ID 20210908025239.1326480-2-malteskarupke@web.de
State Superseded
Headers
Series [v2,1/6] nptl: Fix pthread_cond_signal missing a sleeper (#BZ 25847) |

Checks

Context Check Description
dj/TryBot-apply_patch success Patch applied to master at the time it was sent

Commit Message

Malte Skarupke Sept. 8, 2021, 2:52 a.m. UTC
  There was a rare bug in pthread_cond_wait's handling of the case when
a signal was stolen because a waiter took a long time to leave
pthread_cond_wait.

I wrote about the bug here:
https://probablydance.com/2020/10/31/using-tla-in-the-real-world-to-understand-a-glibc-bug/

The bug was subtle and only happened in an edge-case of an edge-case
so rather than fixing it, I decided to remove the outer edge-case:
By broadening the scope of grefs, stealing of signals becomes
impossible. A signaling thread will always wait for all waiters to
leave pthread_cond_wait before closing a group, so now no waiter from
the past can come back and steal a signal from a future group.

This is patch 1/6, it contains the minimal amount of changes
necessary to fix the bug. This leads to an unnecessary amount of
atomic operations, but the other patches in this series will undo
most of that damage.
---
 nptl/pthread_cond_wait.c | 39 ++++++++++++++++++---------------------
 1 file changed, 18 insertions(+), 21 deletions(-)

--
2.25.1
  

Patch

diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c
index dc8c511f1a..cb674e2634 100644
--- a/nptl/pthread_cond_wait.c
+++ b/nptl/pthread_cond_wait.c
@@ -409,6 +409,12 @@  __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
      we only need to synchronize when decrementing the reference count.  */
   unsigned int flags = atomic_fetch_add_relaxed (&cond->__data.__wrefs, 8);
   int private = __condvar_get_private (flags);
+  /* Acquire a group reference and use acquire MO for that so that we
+     synchronize with the dummy read-modify-write in
+     __condvar_quiesce_and_switch_g1 if we read from the same group.  This will
+     make us see the closed flag on __g_signals that designates a concurrent
+     attempt to reuse the group's slot. */
+  atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2);

   /* Now that we are registered as a waiter, we can release the mutex.
      Waiting on the condvar must be atomic with releasing the mutex, so if
@@ -420,6 +426,7 @@  __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
   err = __pthread_mutex_unlock_usercnt (mutex, 0);
   if (__glibc_unlikely (err != 0))
     {
+      __condvar_dec_grefs (cond, g, private);
       __condvar_cancel_waiting (cond, seq, g, private);
       __condvar_confirm_wakeup (cond, private);
       return err;
@@ -471,24 +478,14 @@  __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
 	    break;

 	  /* No signals available after spinning, so prepare to block.
-	     We first acquire a group reference and use acquire MO for that so
-	     that we synchronize with the dummy read-modify-write in
-	     __condvar_quiesce_and_switch_g1 if we read from that.  In turn,
-	     in this case this will make us see the closed flag on __g_signals
-	     that designates a concurrent attempt to reuse the group's slot.
-	     We use acquire MO for the __g_signals check to make the
-	     __g1_start check work (see spinning above).
-	     Note that the group reference acquisition will not mask the
-	     release MO when decrementing the reference count because we use
-	     an atomic read-modify-write operation and thus extend the release
-	     sequence.  */
-	  atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2);
+	     First check the closed flag on __g_signals that designates a
+	     concurrent attempt to reuse the group's slot. We use acquire MO for
+	     the __g_signals check to make the __g1_start check work (see
+	     above).  */
 	  if (((atomic_load_acquire (cond->__data.__g_signals + g) & 1) != 0)
 	      || (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)))
 	    {
-	      /* Our group is closed.  Wake up any signalers that might be
-		 waiting.  */
-	      __condvar_dec_grefs (cond, g, private);
+	      /* Our group is closed.  */
 	      goto done;
 	    }

@@ -516,10 +513,8 @@  __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
 		 the lock during cancellation is not possible.  */
 	      __condvar_cancel_waiting (cond, seq, g, private);
 	      result = err;
-	      goto done;
+	      goto acquire_lock;
 	    }
-	  else
-	    __condvar_dec_grefs (cond, g, private);

 	  /* Reload signals.  See above for MO.  */
 	  signals = atomic_load_acquire (cond->__data.__g_signals + g);
@@ -598,11 +593,13 @@  __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,

  done:

-  /* Confirm that we have been woken.  We do that before acquiring the mutex
-     to allow for execution of pthread_cond_destroy while having acquired the
-     mutex.  */
+  /* Decrement group reference count and confirm that we have been woken.  We do
+     that before acquiring the mutex to allow for execution of
+     pthread_cond_destroy while having acquired the mutex.  */
+  __condvar_dec_grefs (cond, g, private);
   __condvar_confirm_wakeup (cond, private);

+ acquire_lock:
   /* Woken up; now re-acquire the mutex.  If this doesn't fail, return RESULT,
      which is set to ETIMEDOUT if a timeout occured, or zero otherwise.  */
   err = __pthread_mutex_cond_lock (mutex);