[02/15] nptl: Handle robust PI mutexes for !__ASSUME_SET_ROBUST_LIST (BZ #28268)
Checks
Context |
Check |
Description |
dj/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
Commit Message
The robust PI mutexes are signaled by setting the LSB bit to 1, so
the code requires to take this consideration before access the
__pthread_mutex_s.
The code is also simplified: the initialization code is not really
required, PD->robust_head.list and PD->robust_list.__next are
essentially the same regardless of __PTHREAD_MUTEX_HAVE_PREV, the futex
wake is optimized to be issued only when required, and the futex shared
bit is set only when required.
Checked on a build for m68k-linux-gnu. I also checked on
x86_64-linux-gnu by removing the check for !__ASSUME_SET_ROBUST_LIST.
---
nptl/pthread_create.c | 52 +++++++++++++++++++++----------------------
1 file changed, 26 insertions(+), 26 deletions(-)
@@ -507,35 +507,35 @@ start_thread (void *arg)
__libc_lock_unlock (pd->exit_lock);
#ifndef __ASSUME_SET_ROBUST_LIST
- /* If this thread has any robust mutexes locked, handle them now. */
-# if __PTHREAD_MUTEX_HAVE_PREV
- void *robust = pd->robust_head.list;
-# else
- __pthread_slist_t *robust = pd->robust_list.__next;
-# endif
- /* We let the kernel do the notification if it is able to do so.
- If we have to do it here there for sure are no PI mutexes involved
- since the kernel support for them is even more recent. */
- if (!__nptl_set_robust_list_avail
- && __builtin_expect (robust != (void *) &pd->robust_head, 0))
+ /* We let the kernel do the notification if it is able to do so on the exit
+ syscall. Otherwise we need to handle before the thread terminates. */
+ void **robust;
+ while ((robust = pd->robust_head.list)
+ && robust != (void *) &pd->robust_head)
{
- do
+ /* Note: robust PI futexes are signaled by setting bit 0. */
+ void **robustp = (void **) ((uintptr_t) robust & ~1UL);
+
+ struct __pthread_mutex_s *mtx = (struct __pthread_mutex_s *)
+ ((char *) robustp - offsetof (struct __pthread_mutex_s,
+ __list.__next));
+ int shared = mtx->__kind & 128;
+
+ pd->robust_head.list_op_pending = robust;
+ pd->robust_head.list = *robustp;
+ /* Although the list will not be changed at this point, it follows the
+ expected kernel ABI. */
+ __asm ("" ::: "memory");
+
+ int lock = atomic_exchange_relaxed (&mtx->__lock, FUTEX_OWNER_DIED);
+ /* Wake any users if mutex is acquired. */
+ if (lock > 1)
{
- struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
- ((char *) robust - offsetof (struct __pthread_mutex_s,
- __list.__next));
- robust = *((void **) robust);
-
-# if __PTHREAD_MUTEX_HAVE_PREV
- this->__list.__prev = NULL;
-# endif
- this->__list.__next = NULL;
-
- atomic_or (&this->__lock, FUTEX_OWNER_DIED);
- futex_wake ((unsigned int *) &this->__lock, 1,
- /* XYZ */ FUTEX_SHARED);
+ if ((uintptr_t) robust & 1)
+ futex_unlock_pi ((unsigned int *) &mtx->__lock, shared);
+ else
+ futex_wake ((unsigned int *) &mtx->__lock, 1, shared);
}
- while (robust != (void *) &pd->robust_head);
}
#endif