diff mbox series

[v2] nptl: Add EOVERFLOW checks for futex calls

Message ID 20201127113621.914717-1-adhemerval.zanella@linaro.org
State Committed
Headers show
Series [v2] nptl: Add EOVERFLOW checks for futex calls | expand

Commit Message

Adhemerval Zanella Nov. 27, 2020, 11:36 a.m. UTC
Changes from previous version:

  - Handle __futex_abstimed_wait_cancelable64.

--

Some futex-internal calls require additional check for EOVERFLOW (as
indicated by [1] [2] [3]).  For both mutex and rwlock code, EOVERFLOW is
handle as ETIMEDOUT; since it indicate to the caller that the blocking
operation could not be issued.

For mutex it avoids a possible issue where PTHREAD_MUTEX_ROBUST_* might
assume EOVERFLOW indicate futex has succeed, and for PTHREAD_MUTEX_PP_*
it avoid a potential busy infinite loop.  For rwlock and semaphores, it
also avoids potential busy infinite loops.

Checked on x86_64-linux-gnu and i686-linux-gnu, although EOVERFLOW
won't be possible with current usage (since all timeouts on 32-bit
architectures with 32-bit time_t support will be in the range of
32-bit time_t).

[1] https://sourceware.org/pipermail/libc-alpha/2020-November/120079.html
[2] https://sourceware.org/pipermail/libc-alpha/2020-November/120080.html
[3] https://sourceware.org/pipermail/libc-alpha/2020-November/120127.html
---
 nptl/pthread_cond_wait.c       |  4 ++--
 nptl/pthread_mutex_timedlock.c |  6 +++---
 nptl/pthread_rwlock_common.c   | 14 +++++++-------
 nptl/sem_waitcommon.c          |  2 +-
 4 files changed, 13 insertions(+), 13 deletions(-)

Comments

Lukasz Majewski Nov. 27, 2020, 1:07 p.m. UTC | #1
Hi Adhemerval,

> Changes from previous version:
> 
>   - Handle __futex_abstimed_wait_cancelable64.
> 
> --
> 
> Some futex-internal calls require additional check for EOVERFLOW (as
> indicated by [1] [2] [3]).  For both mutex and rwlock code, EOVERFLOW
> is handle as ETIMEDOUT; since it indicate to the caller that the
> blocking operation could not be issued.
> 
> For mutex it avoids a possible issue where PTHREAD_MUTEX_ROBUST_*
> might assume EOVERFLOW indicate futex has succeed, and for
> PTHREAD_MUTEX_PP_* it avoid a potential busy infinite loop.  For
> rwlock and semaphores, it also avoids potential busy infinite loops.
> 
> Checked on x86_64-linux-gnu and i686-linux-gnu, although EOVERFLOW
> won't be possible with current usage (since all timeouts on 32-bit
> architectures with 32-bit time_t support will be in the range of
> 32-bit time_t).
> 
> [1]
> https://sourceware.org/pipermail/libc-alpha/2020-November/120079.html
> [2]
> https://sourceware.org/pipermail/libc-alpha/2020-November/120080.html
> [3]
> https://sourceware.org/pipermail/libc-alpha/2020-November/120127.html
> --- nptl/pthread_cond_wait.c       |  4 ++--
> nptl/pthread_mutex_timedlock.c |  6 +++---
> nptl/pthread_rwlock_common.c   | 14 +++++++-------
> nptl/sem_waitcommon.c          |  2 +- 4 files changed, 13
> insertions(+), 13 deletions(-)
> 
> diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c
> index 685dbca32f..02d11c61db 100644
> --- a/nptl/pthread_cond_wait.c
> +++ b/nptl/pthread_cond_wait.c
> @@ -506,7 +506,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond,
> pthread_mutex_t *mutex, 
>  	  __pthread_cleanup_pop (&buffer, 0);
>  
> -	  if (__glibc_unlikely (err == ETIMEDOUT))
> +	  if (__glibc_unlikely (err == ETIMEDOUT || err ==
> EOVERFLOW)) {
>  	      __condvar_dec_grefs (cond, g, private);
>  	      /* If we timed out, we effectively cancel waiting.
> Note that @@ -515,7 +515,7 @@ __pthread_cond_wait_common
> (pthread_cond_t *cond, pthread_mutex_t *mutex,
> __condvar_quiesce_and_switch_g1 and us trying to acquire the lock
> during cancellation is not possible.  */ __condvar_cancel_waiting
> (cond, seq, g, private);
> -	      result = ETIMEDOUT;
> +	      result = err;
>  	      goto done;
>  	    }
>  	  else
> diff --git a/nptl/pthread_mutex_timedlock.c
> b/nptl/pthread_mutex_timedlock.c index 74adffe790..6c72a36b2b 100644
> --- a/nptl/pthread_mutex_timedlock.c
> +++ b/nptl/pthread_mutex_timedlock.c
> @@ -270,7 +270,7 @@ __pthread_mutex_clocklock_common (pthread_mutex_t
> *mutex, oldval, clockid, abstime,
>  	      PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
>  	  /* The futex call timed out.  */
> -	  if (err == ETIMEDOUT)
> +	  if (err == ETIMEDOUT || err == EOVERFLOW)
>  	    return err;
>  	  /* Reload current lock value.  */
>  	  oldval = mutex->__data.__lock;
> @@ -550,8 +550,8 @@ __pthread_mutex_clocklock_common (pthread_mutex_t
> *mutex, int e = __futex_abstimed_wait64 (
>  		      (unsigned int *) &mutex->__data.__lock,
> ceilval | 2, clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex));
> -		    if (e == ETIMEDOUT)
> -		      return ETIMEDOUT;
> +		    if (e == ETIMEDOUT || e == EOVERFLOW)
> +		      return e;
>  		  }
>  	      }
>  	    while (atomic_compare_and_exchange_val_acq
> (&mutex->__data.__lock, diff --git a/nptl/pthread_rwlock_common.c
> b/nptl/pthread_rwlock_common.c index 4c9f582d3d..9ef432c474 100644
> --- a/nptl/pthread_rwlock_common.c
> +++ b/nptl/pthread_rwlock_common.c
> @@ -334,7 +334,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t
> *rwlock, clockid_t clockid, private);
>  		  /* We ignore EAGAIN and EINTR.  On time-outs, we
> can just return because we don't need to clean up anything.  */
> -		  if (err == ETIMEDOUT)
> +		  if (err == ETIMEDOUT || err == EOVERFLOW)
>  		    return err;
>  		}
>  	      /* It makes sense to not break out of the outer loop
> here @@ -460,7 +460,7 @@ __pthread_rwlock_rdlock_full64
> (pthread_rwlock_t *rwlock, clockid_t clockid, int err =
> __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex, 1 |
> PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private);
> -	  if (err == ETIMEDOUT)
> +	  if (err == ETIMEDOUT || err == EOVERFLOW)
>  	    {
>  	      /* If we timed out, we need to unregister.  If no read
> phase has been installed while we waited, we can just decrement
> @@ -479,7 +479,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t
> *rwlock, clockid_t clockid, if (atomic_compare_exchange_weak_relaxed
>  		      (&rwlock->__data.__readers, &r,
>  		       r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
> -		    return ETIMEDOUT;
> +		    return err;
>  		  /* TODO Back-off.  */
>  		}
>  	      /* Use the acquire MO fence to mirror the steps taken
> in the @@ -730,7 +730,7 @@ __pthread_rwlock_wrlock_full64
> (pthread_rwlock_t *rwlock, clockid_t clockid, int err =
> __futex_abstimed_wait64 (&rwlock->__data.__writers_futex, 1 |
> PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private);
> -	  if (err == ETIMEDOUT)
> +	  if (err == ETIMEDOUT || err == EOVERFLOW)
>  	    {
>  	      if (prefer_writer)
>  		{
> @@ -758,7 +758,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t
> *rwlock, clockid_t clockid, }
>  	      /* We cleaned up and cannot have stolen another
> waiting writer's futex wake-up, so just return.  */
> -	      return ETIMEDOUT;
> +	      return err;
>  	    }
>  	  /* If we got interrupted (EINTR) or the futex word does
> not have the expected value (EAGAIN), retry after reloading
> __readers.  */ @@ -829,7 +829,7 @@ __pthread_rwlock_wrlock_full64
> (pthread_rwlock_t *rwlock, clockid_t clockid, int err =
> __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
> PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private);
> -	  if (err == ETIMEDOUT)
> +	  if (err == ETIMEDOUT || err == EOVERFLOW)
>  	    {
>  	      if (rwlock->__data.__flags !=
> PTHREAD_RWLOCK_PREFER_READER_NP) {
> @@ -861,7 +861,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t
> *rwlock, clockid_t clockid, if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
>  				futex_wake
> (&rwlock->__data.__writers_futex, 1, private);
> -			      return ETIMEDOUT;
> +			      return err;
>  			    }
>  			  /* TODO Back-off.  */
>  			}
> diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c
> index 6dd4eb97cb..0ac1f139bd 100644
> --- a/nptl/sem_waitcommon.c
> +++ b/nptl/sem_waitcommon.c
> @@ -191,7 +191,7 @@ __new_sem_wait_slow64 (struct new_sem *sem,
> clockid_t clockid, documentation.  Before Linux 2.6.22, EINTR was
> also returned on spurious wake-ups; we only support more recent Linux
> versions, so do not need to consider this here.)  */
> -	  if (err == ETIMEDOUT || err == EINTR)
> +	  if (err == ETIMEDOUT || err == EINTR || err == EOVERFLOW)
>  	    {
>  	      __set_errno (err);
>  	      err = -1;

Reviewed-by: Lukasz Majewski <lukma@denx.de>


Best regards,

Lukasz Majewski

--

DENX Software Engineering GmbH,      Managing Director: Wolfgang Denk
HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany
Phone: (+49)-8142-66989-59 Fax: (+49)-8142-66989-80 Email: lukma@denx.de
diff mbox series

Patch

diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c
index 685dbca32f..02d11c61db 100644
--- a/nptl/pthread_cond_wait.c
+++ b/nptl/pthread_cond_wait.c
@@ -506,7 +506,7 @@  __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
 
 	  __pthread_cleanup_pop (&buffer, 0);
 
-	  if (__glibc_unlikely (err == ETIMEDOUT))
+	  if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
 	    {
 	      __condvar_dec_grefs (cond, g, private);
 	      /* If we timed out, we effectively cancel waiting.  Note that
@@ -515,7 +515,7 @@  __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
 		 __condvar_quiesce_and_switch_g1 and us trying to acquire
 		 the lock during cancellation is not possible.  */
 	      __condvar_cancel_waiting (cond, seq, g, private);
-	      result = ETIMEDOUT;
+	      result = err;
 	      goto done;
 	    }
 	  else
diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index 74adffe790..6c72a36b2b 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -270,7 +270,7 @@  __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
 	      oldval, clockid, abstime,
 	      PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 	  /* The futex call timed out.  */
-	  if (err == ETIMEDOUT)
+	  if (err == ETIMEDOUT || err == EOVERFLOW)
 	    return err;
 	  /* Reload current lock value.  */
 	  oldval = mutex->__data.__lock;
@@ -550,8 +550,8 @@  __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
 		    int e = __futex_abstimed_wait64 (
 		      (unsigned int *) &mutex->__data.__lock, ceilval | 2,
 		      clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex));
-		    if (e == ETIMEDOUT)
-		      return ETIMEDOUT;
+		    if (e == ETIMEDOUT || e == EOVERFLOW)
+		      return e;
 		  }
 	      }
 	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
diff --git a/nptl/pthread_rwlock_common.c b/nptl/pthread_rwlock_common.c
index 4c9f582d3d..9ef432c474 100644
--- a/nptl/pthread_rwlock_common.c
+++ b/nptl/pthread_rwlock_common.c
@@ -334,7 +334,7 @@  __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 		                                     private);
 		  /* We ignore EAGAIN and EINTR.  On time-outs, we can just
 		     return because we don't need to clean up anything.  */
-		  if (err == ETIMEDOUT)
+		  if (err == ETIMEDOUT || err == EOVERFLOW)
 		    return err;
 		}
 	      /* It makes sense to not break out of the outer loop here
@@ -460,7 +460,7 @@  __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 	  int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
 					     1 | PTHREAD_RWLOCK_FUTEX_USED,
 					     clockid, abstime, private);
-	  if (err == ETIMEDOUT)
+	  if (err == ETIMEDOUT || err == EOVERFLOW)
 	    {
 	      /* If we timed out, we need to unregister.  If no read phase
 		 has been installed while we waited, we can just decrement
@@ -479,7 +479,7 @@  __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 		  if (atomic_compare_exchange_weak_relaxed
 		      (&rwlock->__data.__readers, &r,
 		       r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
-		    return ETIMEDOUT;
+		    return err;
 		  /* TODO Back-off.  */
 		}
 	      /* Use the acquire MO fence to mirror the steps taken in the
@@ -730,7 +730,7 @@  __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 	  int err = __futex_abstimed_wait64 (&rwlock->__data.__writers_futex,
 					     1 | PTHREAD_RWLOCK_FUTEX_USED,
 					     clockid, abstime, private);
-	  if (err == ETIMEDOUT)
+	  if (err == ETIMEDOUT || err == EOVERFLOW)
 	    {
 	      if (prefer_writer)
 		{
@@ -758,7 +758,7 @@  __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 		}
 	      /* We cleaned up and cannot have stolen another waiting writer's
 		 futex wake-up, so just return.  */
-	      return ETIMEDOUT;
+	      return err;
 	    }
 	  /* If we got interrupted (EINTR) or the futex word does not have the
 	     expected value (EAGAIN), retry after reloading __readers.  */
@@ -829,7 +829,7 @@  __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 	  int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
 					     PTHREAD_RWLOCK_FUTEX_USED,
 					     clockid, abstime, private);
-	  if (err == ETIMEDOUT)
+	  if (err == ETIMEDOUT || err == EOVERFLOW)
 	    {
 	      if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP)
 		{
@@ -861,7 +861,7 @@  __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
 			      if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
 				futex_wake (&rwlock->__data.__writers_futex,
 					    1, private);
-			      return ETIMEDOUT;
+			      return err;
 			    }
 			  /* TODO Back-off.  */
 			}
diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c
index 6dd4eb97cb..0ac1f139bd 100644
--- a/nptl/sem_waitcommon.c
+++ b/nptl/sem_waitcommon.c
@@ -191,7 +191,7 @@  __new_sem_wait_slow64 (struct new_sem *sem, clockid_t clockid,
 	     documentation.  Before Linux 2.6.22, EINTR was also returned on
 	     spurious wake-ups; we only support more recent Linux versions,
 	     so do not need to consider this here.)  */
-	  if (err == ETIMEDOUT || err == EINTR)
+	  if (err == ETIMEDOUT || err == EINTR || err == EOVERFLOW)
 	    {
 	      __set_errno (err);
 	      err = -1;