From patchwork Tue Jan 17 15:28:29 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stefan Liebler X-Patchwork-Id: 18931 Received: (qmail 7290 invoked by alias); 17 Jan 2017 15:28:39 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Delivered-To: mailing list libc-alpha@sourceware.org Received: (qmail 6884 invoked by uid 89); 17 Jan 2017 15:28:38 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-1.5 required=5.0 tests=AWL, BAYES_00, KAM_LAZY_DOMAIN_SECURITY, RCVD_IN_DNSWL_LOW autolearn=no version=3.3.2 spammy=9728, fpc, restores, sk:elision X-HELO: mx0a-001b2d01.pphosted.com From: Stefan Liebler Subject: Re: [PATCH 3/4] S390: Use new __libc_tbegin_retry macro in elision-lock.c. To: libc-alpha@sourceware.org References: <1481032315-12420-1-git-send-email-stli@linux.vnet.ibm.com> <1481032315-12420-3-git-send-email-stli@linux.vnet.ibm.com> <1484067206.5606.223.camel@redhat.com> Date: Tue, 17 Jan 2017 16:28:29 +0100 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.5.1 MIME-Version: 1.0 In-Reply-To: <1484067206.5606.223.camel@redhat.com> X-TM-AS-GCONF: 00 X-Content-Scanned: Fidelis XPS MAILER x-cbid: 17011715-0016-0000-0000-000004295A93 X-IBM-AV-DETECTION: SAVI=unused REMOTE=unused XFE=unused x-cbparentid: 17011715-0017-0000-0000-0000261D5F04 Message-Id: <720526fb-8fe5-f85f-e55f-89c58c4da4fc@linux.vnet.ibm.com> X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10432:, , definitions=2017-01-17_09:, , signatures=0 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 spamscore=0 suspectscore=3 malwarescore=0 phishscore=0 adultscore=0 bulkscore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.0.1-1612050000 definitions=main-1701170210 On 01/10/2017 05:53 PM, Torvald Riegel wrote: > On Tue, 2016-12-06 at 14:51 +0100, Stefan Liebler wrote: >> This patch implements __libc_tbegin_retry macro which is equivalent to >> gcc builtin __builtin_tbegin_retry, except the changes which were applied >> to __libc_tbegin in the previous patch. >> >> If tbegin aborts with _HTM_TBEGIN_TRANSIENT. Then this macros restores >> the fpc, fprs and automatically retries up to retry_cnt tbegins. >> Further saving of the state is omitted as it is already saved in the >> first round. Before retrying a further transaction, the >> transaction-abort-assist instruction is used to support the cpu. > > This looks okay to me on the surface of it, but I don't know anything > about s390 asm. > >> Use __libc_tbegin_retry macro. >> --- >> sysdeps/unix/sysv/linux/s390/elision-lock.c | 50 ++++++++++++++--------------- >> sysdeps/unix/sysv/linux/s390/htm.h | 36 +++++++++++++++++++-- >> 2 files changed, 58 insertions(+), 28 deletions(-) >> >> diff --git a/sysdeps/unix/sysv/linux/s390/elision-lock.c b/sysdeps/unix/sysv/linux/s390/elision-lock.c >> index 48cc3db..3dd7fbc 100644 >> --- a/sysdeps/unix/sysv/linux/s390/elision-lock.c >> +++ b/sysdeps/unix/sysv/linux/s390/elision-lock.c >> @@ -60,17 +60,16 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) >> goto use_lock; >> } >> >> - int try_tbegin; >> - for (try_tbegin = aconf.try_tbegin; >> - try_tbegin > 0; >> - try_tbegin--) >> + if (aconf.try_tbegin > 0) >> { >> - int status; >> - if (__builtin_expect >> - ((status = __libc_tbegin ((void *) 0)) == _HTM_TBEGIN_STARTED, 1)) >> + int status = __libc_tbegin_retry ((void *) 0, aconf.try_tbegin - 1); > > Maybe add a comment that the macro that reminds the reader that the > macro expects a retry count, not how often a transaction should be > tried. > okay >> + if (__builtin_expect (status == _HTM_TBEGIN_STARTED, >> + _HTM_TBEGIN_STARTED)) >> { >> - if (*futex == 0) >> + if (__builtin_expect (*futex == 0, 1)) >> + /* Lock was free. Return to user code in a transaction. */ >> return 0; >> + >> /* Lock was busy. Fall back to normal locking. */ >> if (__builtin_expect (__libc_tx_nesting_depth (), 1)) >> { >> @@ -81,7 +80,6 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) >> See above for why relaxed MO is sufficient. */ >> if (aconf.skip_lock_busy > 0) >> atomic_store_relaxed (adapt_count, aconf.skip_lock_busy); >> - goto use_lock; >> } >> else /* nesting depth is > 1 */ >> { >> @@ -99,28 +97,28 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) >> __libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1); >> } >> } >> + else if (status != _HTM_TBEGIN_TRANSIENT) >> + { >> + /* A persistent abort (cc 1 or 3) indicates that a retry is >> + probably futile. Use the normal locking now and for the >> + next couple of calls. >> + Be careful to avoid writing to the lock. See above for why >> + relaxed MO is sufficient. */ >> + if (aconf.skip_lock_internal_abort > 0) >> + atomic_store_relaxed (adapt_count, >> + aconf.skip_lock_internal_abort); >> + } >> else >> { >> - if (status != _HTM_TBEGIN_TRANSIENT) >> - { >> - /* A persistent abort (cc 1 or 3) indicates that a retry is >> - probably futile. Use the normal locking now and for the >> - next couple of calls. >> - Be careful to avoid writing to the lock. See above for why >> - relaxed MO is sufficient. */ >> - if (aconf.skip_lock_internal_abort > 0) >> - atomic_store_relaxed (adapt_count, >> - aconf.skip_lock_internal_abort); >> - goto use_lock; >> - } >> + /* Same logic as above, but for for a number of temporary failures in >> + a row. */ > > for for > This comment is rewritten in patch 4/4. Thus it is not changed in attached diff. >> + if (aconf.skip_lock_out_of_tbegin_retries > 0) >> + atomic_store_relaxed (adapt_count, >> + aconf.skip_lock_out_of_tbegin_retries); >> } >> } >> >> - /* Same logic as above, but for for a number of temporary failures in a >> - row. See above for why relaxed MO is sufficient. */ >> - if (aconf.skip_lock_out_of_tbegin_retries > 0 && aconf.try_tbegin > 0) >> - atomic_store_relaxed (adapt_count, aconf.skip_lock_out_of_tbegin_retries); >> - >> use_lock: >> + /* Use normal locking as fallback path if transaction does not succeed. */ > > the transaction > okay. Changed in elision-trylock.c, too. >> return LLL_LOCK ((*futex), private); >> } > > I've attached the diff here and will later make one patch with changelog for this and the other two patches. diff --git a/sysdeps/unix/sysv/linux/s390/elision-lock.c b/sysdeps/unix/sysv/linux/s390/elision-lock.c index a7c0fcc..faa998e 100644 --- a/sysdeps/unix/sysv/linux/s390/elision-lock.c +++ b/sysdeps/unix/sysv/linux/s390/elision-lock.c @@ -57,6 +57,9 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) if (atomic_load_relaxed (futex) == 0 && atomic_load_relaxed (adapt_count) <= 0 && aconf.try_tbegin > 0) { + /* Start a transaction and retry it automatically if it aborts with + _HTM_TBEGIN_TRANSIENT. This macro calls tbegin at most retry_cnt + + 1 times. The second argument is considered as retry_cnt. */ int status = __libc_tbegin_retry ((void *) 0, aconf.try_tbegin - 1); if (__builtin_expect (status == _HTM_TBEGIN_STARTED, _HTM_TBEGIN_STARTED)) @@ -118,6 +121,7 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) } } - /* Use normal locking as fallback path if transaction does not succeed. */ + /* Use normal locking as fallback path if the transaction does not + succeed. */ return LLL_LOCK ((*futex), private); } diff --git a/sysdeps/unix/sysv/linux/s390/elision-trylock.c b/sysdeps/unix/sysv/linux/s390/elision-trylock.c index 3c1d009..eec172a 100644 --- a/sysdeps/unix/sysv/linux/s390/elision-trylock.c +++ b/sysdeps/unix/sysv/linux/s390/elision-trylock.c @@ -93,6 +93,7 @@ __lll_trylock_elision (int *futex, short *adapt_count) /* Could do some retries here. */ } - /* Use normal locking as fallback path if transaction does not succeed. */ + /* Use normal locking as fallback path if the transaction does not + succeed. */ return lll_trylock (*futex); }