[3/4] S390: Use new __libc_tbegin_retry macro in elision-lock.c.
Commit Message
On 01/10/2017 05:53 PM, Torvald Riegel wrote:
> On Tue, 2016-12-06 at 14:51 +0100, Stefan Liebler wrote:
>> This patch implements __libc_tbegin_retry macro which is equivalent to
>> gcc builtin __builtin_tbegin_retry, except the changes which were applied
>> to __libc_tbegin in the previous patch.
>>
>> If tbegin aborts with _HTM_TBEGIN_TRANSIENT. Then this macros restores
>> the fpc, fprs and automatically retries up to retry_cnt tbegins.
>> Further saving of the state is omitted as it is already saved in the
>> first round. Before retrying a further transaction, the
>> transaction-abort-assist instruction is used to support the cpu.
>
> This looks okay to me on the surface of it, but I don't know anything
> about s390 asm.
>
>> Use __libc_tbegin_retry macro.
>> ---
>> sysdeps/unix/sysv/linux/s390/elision-lock.c | 50 ++++++++++++++---------------
>> sysdeps/unix/sysv/linux/s390/htm.h | 36 +++++++++++++++++++--
>> 2 files changed, 58 insertions(+), 28 deletions(-)
>>
>> diff --git a/sysdeps/unix/sysv/linux/s390/elision-lock.c b/sysdeps/unix/sysv/linux/s390/elision-lock.c
>> index 48cc3db..3dd7fbc 100644
>> --- a/sysdeps/unix/sysv/linux/s390/elision-lock.c
>> +++ b/sysdeps/unix/sysv/linux/s390/elision-lock.c
>> @@ -60,17 +60,16 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
>> goto use_lock;
>> }
>>
>> - int try_tbegin;
>> - for (try_tbegin = aconf.try_tbegin;
>> - try_tbegin > 0;
>> - try_tbegin--)
>> + if (aconf.try_tbegin > 0)
>> {
>> - int status;
>> - if (__builtin_expect
>> - ((status = __libc_tbegin ((void *) 0)) == _HTM_TBEGIN_STARTED, 1))
>> + int status = __libc_tbegin_retry ((void *) 0, aconf.try_tbegin - 1);
>
> Maybe add a comment that the macro that reminds the reader that the
> macro expects a retry count, not how often a transaction should be
> tried.
>
okay
>> + if (__builtin_expect (status == _HTM_TBEGIN_STARTED,
>> + _HTM_TBEGIN_STARTED))
>> {
>> - if (*futex == 0)
>> + if (__builtin_expect (*futex == 0, 1))
>> + /* Lock was free. Return to user code in a transaction. */
>> return 0;
>> +
>> /* Lock was busy. Fall back to normal locking. */
>> if (__builtin_expect (__libc_tx_nesting_depth (), 1))
>> {
>> @@ -81,7 +80,6 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
>> See above for why relaxed MO is sufficient. */
>> if (aconf.skip_lock_busy > 0)
>> atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
>> - goto use_lock;
>> }
>> else /* nesting depth is > 1 */
>> {
>> @@ -99,28 +97,28 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
>> __libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
>> }
>> }
>> + else if (status != _HTM_TBEGIN_TRANSIENT)
>> + {
>> + /* A persistent abort (cc 1 or 3) indicates that a retry is
>> + probably futile. Use the normal locking now and for the
>> + next couple of calls.
>> + Be careful to avoid writing to the lock. See above for why
>> + relaxed MO is sufficient. */
>> + if (aconf.skip_lock_internal_abort > 0)
>> + atomic_store_relaxed (adapt_count,
>> + aconf.skip_lock_internal_abort);
>> + }
>> else
>> {
>> - if (status != _HTM_TBEGIN_TRANSIENT)
>> - {
>> - /* A persistent abort (cc 1 or 3) indicates that a retry is
>> - probably futile. Use the normal locking now and for the
>> - next couple of calls.
>> - Be careful to avoid writing to the lock. See above for why
>> - relaxed MO is sufficient. */
>> - if (aconf.skip_lock_internal_abort > 0)
>> - atomic_store_relaxed (adapt_count,
>> - aconf.skip_lock_internal_abort);
>> - goto use_lock;
>> - }
>> + /* Same logic as above, but for for a number of temporary failures in
>> + a row. */
>
> for for
>
This comment is rewritten in patch 4/4. Thus it is not changed in
attached diff.
>> + if (aconf.skip_lock_out_of_tbegin_retries > 0)
>> + atomic_store_relaxed (adapt_count,
>> + aconf.skip_lock_out_of_tbegin_retries);
>> }
>> }
>>
>> - /* Same logic as above, but for for a number of temporary failures in a
>> - row. See above for why relaxed MO is sufficient. */
>> - if (aconf.skip_lock_out_of_tbegin_retries > 0 && aconf.try_tbegin > 0)
>> - atomic_store_relaxed (adapt_count, aconf.skip_lock_out_of_tbegin_retries);
>> -
>> use_lock:
>> + /* Use normal locking as fallback path if transaction does not succeed. */
>
> the transaction
>
okay. Changed in elision-trylock.c, too.
>> return LLL_LOCK ((*futex), private);
>> }
>
>
I've attached the diff here and will later make one patch with changelog
for this and the other two patches.
Comments
On Tue, 2017-01-17 at 16:28 +0100, Stefan Liebler wrote:
> I've attached the diff here and will later make one patch with changelog
> for this and the other two patches.
This diff is OK.
@@ -57,6 +57,9 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
if (atomic_load_relaxed (futex) == 0
&& atomic_load_relaxed (adapt_count) <= 0 && aconf.try_tbegin > 0)
{
+ /* Start a transaction and retry it automatically if it aborts with
+ _HTM_TBEGIN_TRANSIENT. This macro calls tbegin at most retry_cnt
+ + 1 times. The second argument is considered as retry_cnt. */
int status = __libc_tbegin_retry ((void *) 0, aconf.try_tbegin - 1);
if (__builtin_expect (status == _HTM_TBEGIN_STARTED,
_HTM_TBEGIN_STARTED))
@@ -118,6 +121,7 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
}
}
- /* Use normal locking as fallback path if transaction does not succeed. */
+ /* Use normal locking as fallback path if the transaction does not
+ succeed. */
return LLL_LOCK ((*futex), private);
}
@@ -93,6 +93,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
/* Could do some retries here. */
}
- /* Use normal locking as fallback path if transaction does not succeed. */
+ /* Use normal locking as fallback path if the transaction does not
+ succeed. */
return lll_trylock (*futex);
}