powerpc: Fix usage of elision transient failure adapt param

Message ID 55D625B9.9060700@linux.vnet.ibm.com
State Superseded
Headers

Commit Message

Paul E. Murphy Aug. 20, 2015, 7:08 p.m. UTC
  The skip_lock_out_of_tbegin_retries adaptive parameter was
not being used correctly, nor as described.  This prevents
a fallback for all users of the lock if a transient abort
occurs within the accepted number of retries.

2015-08-20  Paul E. Murphy  <murphyp@linux.vnet.ibm.com>
	* sysdeps/powerpc/nptl/elide.h(__elide_lock): Fix
	usage of .skip_lock_out_of_tbegin_retries
	* sysdeps/unix/sysv/linux/powerpc/elision-lock.c (__lll_lock_elision):
	Likewise, and respect a value of try_tbegin <= 0
---
 sysdeps/powerpc/nptl/elide.h                   |   10 +++++-----
 sysdeps/unix/sysv/linux/powerpc/elision-lock.c |   15 ++++++---------
 2 files changed, 11 insertions(+), 14 deletions(-)
  

Comments

Adhemerval Zanella Netto Aug. 20, 2015, 7:17 p.m. UTC | #1
LGTM, thanks for checking on that.

On 20-08-2015 16:08, Paul E. Murphy wrote:
> The skip_lock_out_of_tbegin_retries adaptive parameter was
> not being used correctly, nor as described.  This prevents
> a fallback for all users of the lock if a transient abort
> occurs within the accepted number of retries.
> 
> 2015-08-20  Paul E. Murphy  <murphyp@linux.vnet.ibm.com>
> 	* sysdeps/powerpc/nptl/elide.h(__elide_lock): Fix
> 	usage of .skip_lock_out_of_tbegin_retries
> 	* sysdeps/unix/sysv/linux/powerpc/elision-lock.c (__lll_lock_elision):
> 	Likewise, and respect a value of try_tbegin <= 0
> ---
>  sysdeps/powerpc/nptl/elide.h                   |   10 +++++-----
>  sysdeps/unix/sysv/linux/powerpc/elision-lock.c |   15 ++++++---------
>  2 files changed, 11 insertions(+), 14 deletions(-)
> 
> diff --git a/sysdeps/powerpc/nptl/elide.h b/sysdeps/powerpc/nptl/elide.h
> index 389f5a5..38e1565 100644
> --- a/sysdeps/powerpc/nptl/elide.h
> +++ b/sysdeps/powerpc/nptl/elide.h
> @@ -55,14 +55,14 @@ __elide_lock (uint8_t *adapt_count, int is_lock_free)
>  		*adapt_count = __elision_aconf.skip_lock_internal_abort;
>  	      break;
>  	    }
> -	  /* Same logic as above, but for a number of temporary failures in a
> -	     a row.  */
> -	  else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
> -		   && __elision_aconf.try_tbegin > 0)
> -	    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
>  	}
>       }
>  
> +  /* Fall back to locks for a bit if retries have been exceeded */
> +  if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
> +      && __elision_aconf.try_tbegin > 0)
> +    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
> +
>    return false;
>  }
>  
> diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> index 7f9bcc2..c6731ca 100644
> --- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> +++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
> @@ -72,8 +72,7 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>        goto use_lock;
>      }
>  
> -  int try_begin = aconf.try_tbegin;
> -  while (1)
> +  for (int i = aconf.try_tbegin; i > 0; i--)
>      {
>        if (__builtin_tbegin (0))
>  	{
> @@ -87,21 +86,19 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>  	  /* A persistent failure indicates that a retry will probably
>  	     result in another failure.  Use normal locking now and
>  	     for the next couple of calls.  */
> -	  if (try_begin-- <= 0
> -	      || _TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
> +	  if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
>  	    {
>  	      if (aconf.skip_lock_internal_abort > 0)
>  		*adapt_count = aconf.skip_lock_internal_abort;
>  	      goto use_lock;
>  	    }
> -	  /* Same logic as above, but for for a number of temporary failures
> -	     in a row.  */
> -	  else if (aconf.skip_lock_out_of_tbegin_retries > 0
> -                   && aconf.try_tbegin > 0)
> -	    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
>  	}
>       }
>  
> +  /* Fall back to locks for a bit if retries have been exhausted */
> +  if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
> +    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
> +
>  use_lock:
>    return LLL_LOCK ((*lock), pshared);
>  }
>
  
Adhemerval Zanella Netto Aug. 20, 2015, 7:22 p.m. UTC | #2
However this clash with Tulio update from his rave condition fix [1].
You will need to coordinate with him to either drop this patch or
push his modification without this fix.

[1] https://sourceware.org/ml/libc-alpha/2015-08/msg00858.html

On 20-08-2015 16:17, Adhemerval Zanella wrote:
> LGTM, thanks for checking on that.
> 
> On 20-08-2015 16:08, Paul E. Murphy wrote:
>> The skip_lock_out_of_tbegin_retries adaptive parameter was
>> not being used correctly, nor as described.  This prevents
>> a fallback for all users of the lock if a transient abort
>> occurs within the accepted number of retries.
>>
>> 2015-08-20  Paul E. Murphy  <murphyp@linux.vnet.ibm.com>
>> 	* sysdeps/powerpc/nptl/elide.h(__elide_lock): Fix
>> 	usage of .skip_lock_out_of_tbegin_retries
>> 	* sysdeps/unix/sysv/linux/powerpc/elision-lock.c (__lll_lock_elision):
>> 	Likewise, and respect a value of try_tbegin <= 0
>> ---
>>  sysdeps/powerpc/nptl/elide.h                   |   10 +++++-----
>>  sysdeps/unix/sysv/linux/powerpc/elision-lock.c |   15 ++++++---------
>>  2 files changed, 11 insertions(+), 14 deletions(-)
>>
>> diff --git a/sysdeps/powerpc/nptl/elide.h b/sysdeps/powerpc/nptl/elide.h
>> index 389f5a5..38e1565 100644
>> --- a/sysdeps/powerpc/nptl/elide.h
>> +++ b/sysdeps/powerpc/nptl/elide.h
>> @@ -55,14 +55,14 @@ __elide_lock (uint8_t *adapt_count, int is_lock_free)
>>  		*adapt_count = __elision_aconf.skip_lock_internal_abort;
>>  	      break;
>>  	    }
>> -	  /* Same logic as above, but for a number of temporary failures in a
>> -	     a row.  */
>> -	  else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
>> -		   && __elision_aconf.try_tbegin > 0)
>> -	    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
>>  	}
>>       }
>>  
>> +  /* Fall back to locks for a bit if retries have been exceeded */
>> +  if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
>> +      && __elision_aconf.try_tbegin > 0)
>> +    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
>> +
>>    return false;
>>  }
>>  
>> diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
>> index 7f9bcc2..c6731ca 100644
>> --- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
>> +++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
>> @@ -72,8 +72,7 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>>        goto use_lock;
>>      }
>>  
>> -  int try_begin = aconf.try_tbegin;
>> -  while (1)
>> +  for (int i = aconf.try_tbegin; i > 0; i--)
>>      {
>>        if (__builtin_tbegin (0))
>>  	{
>> @@ -87,21 +86,19 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
>>  	  /* A persistent failure indicates that a retry will probably
>>  	     result in another failure.  Use normal locking now and
>>  	     for the next couple of calls.  */
>> -	  if (try_begin-- <= 0
>> -	      || _TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
>> +	  if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
>>  	    {
>>  	      if (aconf.skip_lock_internal_abort > 0)
>>  		*adapt_count = aconf.skip_lock_internal_abort;
>>  	      goto use_lock;
>>  	    }
>> -	  /* Same logic as above, but for for a number of temporary failures
>> -	     in a row.  */
>> -	  else if (aconf.skip_lock_out_of_tbegin_retries > 0
>> -                   && aconf.try_tbegin > 0)
>> -	    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
>>  	}
>>       }
>>  
>> +  /* Fall back to locks for a bit if retries have been exhausted */
>> +  if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
>> +    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
>> +
>>  use_lock:
>>    return LLL_LOCK ((*lock), pshared);
>>  }
>>
  

Patch

diff --git a/sysdeps/powerpc/nptl/elide.h b/sysdeps/powerpc/nptl/elide.h
index 389f5a5..38e1565 100644
--- a/sysdeps/powerpc/nptl/elide.h
+++ b/sysdeps/powerpc/nptl/elide.h
@@ -55,14 +55,14 @@  __elide_lock (uint8_t *adapt_count, int is_lock_free)
 		*adapt_count = __elision_aconf.skip_lock_internal_abort;
 	      break;
 	    }
-	  /* Same logic as above, but for a number of temporary failures in a
-	     a row.  */
-	  else if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
-		   && __elision_aconf.try_tbegin > 0)
-	    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
 	}
      }
 
+  /* Fall back to locks for a bit if retries have been exceeded */
+  if (__elision_aconf.skip_lock_out_of_tbegin_retries > 0
+      && __elision_aconf.try_tbegin > 0)
+    *adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
+
   return false;
 }
 
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
index 7f9bcc2..c6731ca 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
@@ -72,8 +72,7 @@  __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
       goto use_lock;
     }
 
-  int try_begin = aconf.try_tbegin;
-  while (1)
+  for (int i = aconf.try_tbegin; i > 0; i--)
     {
       if (__builtin_tbegin (0))
 	{
@@ -87,21 +86,19 @@  __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
 	  /* A persistent failure indicates that a retry will probably
 	     result in another failure.  Use normal locking now and
 	     for the next couple of calls.  */
-	  if (try_begin-- <= 0
-	      || _TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
+	  if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
 	    {
 	      if (aconf.skip_lock_internal_abort > 0)
 		*adapt_count = aconf.skip_lock_internal_abort;
 	      goto use_lock;
 	    }
-	  /* Same logic as above, but for for a number of temporary failures
-	     in a row.  */
-	  else if (aconf.skip_lock_out_of_tbegin_retries > 0
-                   && aconf.try_tbegin > 0)
-	    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
 	}
      }
 
+  /* Fall back to locks for a bit if retries have been exhausted */
+  if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
+    *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
+
 use_lock:
   return LLL_LOCK ((*lock), pshared);
 }