[20/26] Linux: set_robust_list syscall number is always available
Commit Message
Due to the built-in tables, __NR_set_robust_list is always defined
(although it may not be available at run time).
---
nptl/nptl-init.c | 4 ----
nptl/pthread_create.c | 6 ++----
sysdeps/nptl/fork.c | 10 ++++------
3 files changed, 6 insertions(+), 14 deletions(-)
Comments
On 09/02/2020 17:21, Florian Weimer wrote:
> Due to the built-in tables, __NR_set_robust_list is always defined
> (although it may not be available at run time).
LGTM, thanks.
Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
> ---
> nptl/nptl-init.c | 4 ----
> nptl/pthread_create.c | 6 ++----
> sysdeps/nptl/fork.c | 10 ++++------
> 3 files changed, 6 insertions(+), 14 deletions(-)
>
> diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
> index 1877248014..373be89c95 100644
> --- a/nptl/nptl-init.c
> +++ b/nptl/nptl-init.c
> @@ -129,11 +129,9 @@ static
> void
> __nptl_set_robust (struct pthread *self)
> {
> -#ifdef __NR_set_robust_list
> INTERNAL_SYSCALL_DECL (err);
> INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
> sizeof (struct robust_list_head));
> -#endif
> }
>
>
> @@ -254,7 +252,6 @@ __pthread_initialize_minimal_internal (void)
> pd->robust_prev = &pd->robust_head;
> #endif
> pd->robust_head.list = &pd->robust_head;
> -#ifdef __NR_set_robust_list
> pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
> - offsetof (pthread_mutex_t,
> __data.__list.__next));
> @@ -262,7 +259,6 @@ __pthread_initialize_minimal_internal (void)
> int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
> sizeof (struct robust_list_head));
> if (INTERNAL_SYSCALL_ERROR_P (res, err))
> -#endif
> set_robust_list_not_avail ();
> }
>
I wonder if it would be better to refactor the __nptl_set_robust to
simplify the '__set_robust_list_avail' handling. Something like:
--
void
__nptl_set_robust (struct pthread *pd)
{
#ifdef __ASSUME_SET_ROBUST_LIST
static int set_robust_supported = true;
if (atomic_load_relaxed (&getdents64_supported))
{
int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
sizeof (struct robust_list_head));
if (INTERNAL_SYSCALL_ERROR_P (res))
atomic_store_relaxed (&getdents64_supported, false);
}
#endif
}
--
> diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
> index d3fd58730c..58706b4160 100644
> --- a/nptl/pthread_create.c
> +++ b/nptl/pthread_create.c
> @@ -389,10 +389,9 @@ START_THREAD_DEFN
> if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
> futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
>
> -#ifdef __NR_set_robust_list
> -# ifndef __ASSUME_SET_ROBUST_LIST
> +#ifndef __ASSUME_SET_ROBUST_LIST
> if (__set_robust_list_avail >= 0)
> -# endif
> +#endif
> {
> INTERNAL_SYSCALL_DECL (err);
> /* This call should never fail because the initial call in init.c
> @@ -400,7 +399,6 @@ START_THREAD_DEFN
> INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
> sizeof (struct robust_list_head));
> }
> -#endif
>
> /* If the parent was running cancellation handlers while creating
> the thread the new thread inherited the signal mask. Reset the
Ok.
> diff --git a/sysdeps/nptl/fork.c b/sysdeps/nptl/fork.c
> index f5cf88d68c..5091a000e3 100644
> --- a/sysdeps/nptl/fork.c
> +++ b/sysdeps/nptl/fork.c
> @@ -83,7 +83,6 @@ __libc_fork (void)
> if (__fork_generation_pointer != NULL)
> *__fork_generation_pointer += __PTHREAD_ONCE_FORK_GEN_INCR;
>
> -#ifdef __NR_set_robust_list
> /* Initialize the robust mutex list setting in the kernel which has
> been reset during the fork. We do not check for errors because if
> it fails here, it must have failed at process startup as well and
> @@ -94,19 +93,18 @@ __libc_fork (void)
> inherit the correct value from the parent. We do not need to clear
> the pending operation because it must have been zero when fork was
> called. */
> -# if __PTHREAD_MUTEX_HAVE_PREV
> +#if __PTHREAD_MUTEX_HAVE_PREV
> self->robust_prev = &self->robust_head;
> -# endif
> +#endif
> self->robust_head.list = &self->robust_head;
> -# ifdef SHARED
> +#ifdef SHARED
> if (__builtin_expect (__libc_pthread_functions_init, 0))
> PTHFCT_CALL (ptr_set_robust, (self));
> -# else
> +#else
> extern __typeof (__nptl_set_robust) __nptl_set_robust
> __attribute__((weak));
> if (__builtin_expect (__nptl_set_robust != NULL, 0))
> __nptl_set_robust (self);
> -# endif
> #endif
>
> /* Reset the lock state in the multi-threaded case. */
>
Ok.
@@ -129,11 +129,9 @@ static
void
__nptl_set_robust (struct pthread *self)
{
-#ifdef __NR_set_robust_list
INTERNAL_SYSCALL_DECL (err);
INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
sizeof (struct robust_list_head));
-#endif
}
@@ -254,7 +252,6 @@ __pthread_initialize_minimal_internal (void)
pd->robust_prev = &pd->robust_head;
#endif
pd->robust_head.list = &pd->robust_head;
-#ifdef __NR_set_robust_list
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
- offsetof (pthread_mutex_t,
__data.__list.__next));
@@ -262,7 +259,6 @@ __pthread_initialize_minimal_internal (void)
int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
if (INTERNAL_SYSCALL_ERROR_P (res, err))
-#endif
set_robust_list_not_avail ();
}
@@ -389,10 +389,9 @@ START_THREAD_DEFN
if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
-#ifdef __NR_set_robust_list
-# ifndef __ASSUME_SET_ROBUST_LIST
+#ifndef __ASSUME_SET_ROBUST_LIST
if (__set_robust_list_avail >= 0)
-# endif
+#endif
{
INTERNAL_SYSCALL_DECL (err);
/* This call should never fail because the initial call in init.c
@@ -400,7 +399,6 @@ START_THREAD_DEFN
INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
}
-#endif
/* If the parent was running cancellation handlers while creating
the thread the new thread inherited the signal mask. Reset the
@@ -83,7 +83,6 @@ __libc_fork (void)
if (__fork_generation_pointer != NULL)
*__fork_generation_pointer += __PTHREAD_ONCE_FORK_GEN_INCR;
-#ifdef __NR_set_robust_list
/* Initialize the robust mutex list setting in the kernel which has
been reset during the fork. We do not check for errors because if
it fails here, it must have failed at process startup as well and
@@ -94,19 +93,18 @@ __libc_fork (void)
inherit the correct value from the parent. We do not need to clear
the pending operation because it must have been zero when fork was
called. */
-# if __PTHREAD_MUTEX_HAVE_PREV
+#if __PTHREAD_MUTEX_HAVE_PREV
self->robust_prev = &self->robust_head;
-# endif
+#endif
self->robust_head.list = &self->robust_head;
-# ifdef SHARED
+#ifdef SHARED
if (__builtin_expect (__libc_pthread_functions_init, 0))
PTHFCT_CALL (ptr_set_robust, (self));
-# else
+#else
extern __typeof (__nptl_set_robust) __nptl_set_robust
__attribute__((weak));
if (__builtin_expect (__nptl_set_robust != NULL, 0))
__nptl_set_robust (self);
-# endif
#endif
/* Reset the lock state in the multi-threaded case. */