[v1] nptl: Replace CAS with cheaper atomics for pthread_cancel logic
Checks
Context |
Check |
Description |
dj/TryBot-32bit |
success
|
Build for i686
|
dj/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
Commit Message
The logic for setting/unsetting/testing CANCELTYPE_BITMASK can often
be done with `atomic_fetch_{or|and}` instead of a CAS.
On some arch (x86 for example) this can be optimized to more efficient
instructions (`bt{s|r}`) instead of a CAS loop. Otherwise compilers
will just optimize it to the CAS equivilent.
Full check passed on x86-64 linux.
---
The replacements of CAS with atomic_fetch_and in:
- ___pthread_register_cancel_defer
- __libc_cleanup_push_defer
is logically equivilent AFAICT.
The replacement of CAS with atomic_fetch_or in:
- ___pthread_unregister_cancel_restore
- __libc_cleanup_pop_restore
not not logically equivilent and may have an issue.
In `___pthread_unregister_cancel_restore` and
`__libc_cleanup_pop_restore` since the CAS loop would reload
`cancelhandling` on failed CAS there may now be a race condition on
the later check of `cancel_enabled_and_canceled`.
nptl/cleanup_defer.c | 28 ++++++++--------------------
nptl/libc-cleanup.c | 28 ++++++++--------------------
2 files changed, 16 insertions(+), 40 deletions(-)
@@ -33,19 +33,14 @@ ___pthread_register_cancel_defer (__pthread_unwind_buf_t *buf)
int cancelhandling = atomic_load_relaxed (&self->cancelhandling);
if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
{
- int newval;
- do
- {
- newval = cancelhandling & ~CANCELTYPE_BITMASK;
- }
- while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
- &cancelhandling,
- newval));
+ if (atomic_fetch_and_acquire (&self->cancelhandling, ~CANCELTYPE_BITMASK)
+ & CANCELTYPE_BITMASK)
+ ibuf->priv.data.canceltype = PTHREAD_CANCEL_ASYNCHRONOUS;
+ else
+ ibuf->priv.data.canceltype = PTHREAD_CANCEL_DEFERRED;
}
-
- ibuf->priv.data.canceltype = (cancelhandling & CANCELTYPE_BITMASK
- ? PTHREAD_CANCEL_ASYNCHRONOUS
- : PTHREAD_CANCEL_DEFERRED);
+ else
+ ibuf->priv.data.canceltype = PTHREAD_CANCEL_DEFERRED;
/* Store the new cleanup handler info. */
THREAD_SETMEM (self, cleanup_jmp_buf, (struct pthread_unwind_buf *) buf);
@@ -73,14 +68,7 @@ ___pthread_unregister_cancel_restore (__pthread_unwind_buf_t *buf)
int cancelhandling = atomic_load_relaxed (&self->cancelhandling);
if ((cancelhandling & CANCELTYPE_BITMASK) == 0)
{
- int newval;
- do
- {
- newval = cancelhandling | CANCELTYPE_BITMASK;
- }
- while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
- &cancelhandling, newval));
-
+ atomic_fetch_or_acquire (&self->cancelhandling, CANCELTYPE_BITMASK);
if (cancel_enabled_and_canceled (cancelhandling))
{
self->result = PTHREAD_CANCELED;
@@ -31,19 +31,14 @@ __libc_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer)
/* Disable asynchronous cancellation for now. */
if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
{
- int newval;
- do
- {
- newval = cancelhandling & ~CANCELTYPE_BITMASK;
- }
- while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
- &cancelhandling,
- newval));
+ if (atomic_fetch_and_acquire (&self->cancelhandling, ~CANCELTYPE_BITMASK)
+ & CANCELTYPE_BITMASK)
+ buffer->__canceltype = PTHREAD_CANCEL_ASYNCHRONOUS;
+ else
+ buffer->__canceltype = PTHREAD_CANCEL_DEFERRED;
}
-
- buffer->__canceltype = (cancelhandling & CANCELTYPE_BITMASK
- ? PTHREAD_CANCEL_ASYNCHRONOUS
- : PTHREAD_CANCEL_DEFERRED);
+ else
+ buffer->__canceltype = PTHREAD_CANCEL_DEFERRED;
THREAD_SETMEM (self, cleanup, buffer);
}
@@ -60,14 +55,7 @@ __libc_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer)
if (buffer->__canceltype != PTHREAD_CANCEL_DEFERRED
&& (cancelhandling & CANCELTYPE_BITMASK) == 0)
{
- int newval;
- do
- {
- newval = cancelhandling | CANCELTYPE_BITMASK;
- }
- while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
- &cancelhandling, newval));
-
+ atomic_fetch_or_acquire (&self->cancelhandling, CANCELTYPE_BITMASK);
if (cancel_enabled_and_canceled (cancelhandling))
{
self->result = PTHREAD_CANCELED;