nptl: Add EOVERFLOW checks for futex calls

Message ID 20201126203954.443334-1-adhemerval.zanella@linaro.org
State Superseded
Headers
Series nptl: Add EOVERFLOW checks for futex calls |

Commit Message

Adhemerval Zanella Nov. 26, 2020, 8:39 p.m. UTC
  Some futex-internal calls require additional check for EOVERFLOW (as
indicated by [1] [2]).  For both mutex and rwlock code, EOVERFLOW is
handle as ETIMEDOUT; since it indicate to the caller that the blocking
operation could not be performed.

For mutex it avoids a possible issue where PTHREAD_MUTEX_ROBUST_* might
assume EOVERFLOW indicate futex has succeed, and for PTHREAD_MUTEX_PP_*
it avoid a potential busy infinite loop.  For rwlock, is also avoid
potential busy infinite loops.

Checked on x86_64-linux-gnu and i686-linux-gnu, although EOVERFLOW
won't be possible with current usage (since all timeouts on 32-bit
architectures with 32-bit time_t support will be in the range of
32-bit time_t).

[1] https://sourceware.org/pipermail/libc-alpha/2020-November/120079.html
[2] https://sourceware.org/pipermail/libc-alpha/2020-November/120080.html
---
 nptl/pthread_mutex_timedlock.c |  6 +++---
 nptl/pthread_rwlock_common.c   | 14 +++++++-------
 2 files changed, 10 insertions(+), 10 deletions(-)
  

Comments

Florian Weimer Nov. 27, 2020, 9:52 a.m. UTC | #1
* Adhemerval Zanella via Libc-alpha:

> Some futex-internal calls require additional check for EOVERFLOW (as
> indicated by [1] [2]).  For both mutex and rwlock code, EOVERFLOW is
> handle as ETIMEDOUT; since it indicate to the caller that the blocking
> operation could not be performed.
>
> For mutex it avoids a possible issue where PTHREAD_MUTEX_ROBUST_* might
> assume EOVERFLOW indicate futex has succeed, and for PTHREAD_MUTEX_PP_*
> it avoid a potential busy infinite loop.  For rwlock, is also avoid
> potential busy infinite loops.

What about the semaphore case (in __new_sem_wait_slow64)?

Thanks,
Florian
  
Adhemerval Zanella Nov. 27, 2020, 11:03 a.m. UTC | #2
On 27/11/2020 06:52, Florian Weimer wrote:
> * Adhemerval Zanella via Libc-alpha:
> 
>> Some futex-internal calls require additional check for EOVERFLOW (as
>> indicated by [1] [2]).  For both mutex and rwlock code, EOVERFLOW is
>> handle as ETIMEDOUT; since it indicate to the caller that the blocking
>> operation could not be performed.
>>
>> For mutex it avoids a possible issue where PTHREAD_MUTEX_ROBUST_* might
>> assume EOVERFLOW indicate futex has succeed, and for PTHREAD_MUTEX_PP_*
>> it avoid a potential busy infinite loop.  For rwlock, is also avoid
>> potential busy infinite loops.
> 
> What about the semaphore case (in __new_sem_wait_slow64)?

Indeed I forgot to analyze the cases of __futex_abstimed_wait_cancelable64,
there is some usage as well that requires adjustments. I will update the
patch.
  

Patch

diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index 74adffe790..6c72a36b2b 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -270,7 +270,7 @@  __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
 	      oldval, clockid, abstime,
 	      PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 	  /* The futex call timed out.  */
-	  if (err == ETIMEDOUT)
+	  if (err == ETIMEDOUT || err == EOVERFLOW)
 	    return err;
 	  /* Reload current lock value.  */
 	  oldval = mutex->__data.__lock;
@@ -550,8 +550,8 @@  __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
 		    int e = __futex_abstimed_wait64 (
 		      (unsigned int *) &mutex->__data.__lock, ceilval | 2,
 		      clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex));
-		    if (e == ETIMEDOUT)
-		      return ETIMEDOUT;
+		    if (e == ETIMEDOUT || e == EOVERFLOW)
+		      return e;
 		  }
 	      }
 	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
diff --git a/nptl/pthread_rwlock_common.c b/nptl/pthread_rwlock_common.c
index 4c9f582d3d..9ef432c474 100644
--- a/nptl/pthread_rwlock_common.c
+++ b/nptl/pthread_rwlock_common.c
@@ -334,7 +334,7 @@  __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 		                                     private);
 		  /* We ignore EAGAIN and EINTR.  On time-outs, we can just
 		     return because we don't need to clean up anything.  */
-		  if (err == ETIMEDOUT)
+		  if (err == ETIMEDOUT || err == EOVERFLOW)
 		    return err;
 		}
 	      /* It makes sense to not break out of the outer loop here
@@ -460,7 +460,7 @@  __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 	  int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
 					     1 | PTHREAD_RWLOCK_FUTEX_USED,
 					     clockid, abstime, private);
-	  if (err == ETIMEDOUT)
+	  if (err == ETIMEDOUT || err == EOVERFLOW)
 	    {
 	      /* If we timed out, we need to unregister.  If no read phase
 		 has been installed while we waited, we can just decrement
@@ -479,7 +479,7 @@  __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 		  if (atomic_compare_exchange_weak_relaxed
 		      (&rwlock->__data.__readers, &r,
 		       r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
-		    return ETIMEDOUT;
+		    return err;
 		  /* TODO Back-off.  */
 		}
 	      /* Use the acquire MO fence to mirror the steps taken in the
@@ -730,7 +730,7 @@  __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 	  int err = __futex_abstimed_wait64 (&rwlock->__data.__writers_futex,
 					     1 | PTHREAD_RWLOCK_FUTEX_USED,
 					     clockid, abstime, private);
-	  if (err == ETIMEDOUT)
+	  if (err == ETIMEDOUT || err == EOVERFLOW)
 	    {
 	      if (prefer_writer)
 		{
@@ -758,7 +758,7 @@  __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 		}
 	      /* We cleaned up and cannot have stolen another waiting writer's
 		 futex wake-up, so just return.  */
-	      return ETIMEDOUT;
+	      return err;
 	    }
 	  /* If we got interrupted (EINTR) or the futex word does not have the
 	     expected value (EAGAIN), retry after reloading __readers.  */
@@ -829,7 +829,7 @@  __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 	  int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
 					     PTHREAD_RWLOCK_FUTEX_USED,
 					     clockid, abstime, private);
-	  if (err == ETIMEDOUT)
+	  if (err == ETIMEDOUT || err == EOVERFLOW)
 	    {
 	      if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP)
 		{
@@ -861,7 +861,7 @@  __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 			      if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
 				futex_wake (&rwlock->__data.__writers_futex,
 					    1, private);
-			      return ETIMEDOUT;
+			      return err;
 			    }
 			  /* TODO Back-off.  */
 			}