[01/12] linux: Fix vDSO macros build with time64 interfaces
Commit Message
As indicated on libc-help [1] the ec138c67cb commit broke 32-bit
builds when configured with --enable-kernel=5.1 or higher. The
scenario 10 from [2] might also occur in this configuration and
INLINE_VSYSCALL will try to use the vDSO symbol and
HAVE_CLOCK_GETTIME64_VSYSCALL does not set HAVE_VSYSCALL prior its
usage.
Also, there is no easy way to just enable the code to use one
vDSO sysmbo since the macro INLINE_VSYSCALL is redefined if
HAVE_VSYSCALL is set.
Instead of adding more pre-processor handling and making the code
even more convoluted, this patch removes the requirement of defining
HAVE_VSYSCALL before including sysdep-vdso.h to enable vDSO usage.
The INLINE_VSYSCALL now only tries to call the vDSO symbol through
the function pointer, the fallback code to try the syscall is done
explicitly. The internal INLINE_VSYSCALL_CALL semantic is also
changed to return a negative errno value in case of an error, it allows
simplify INLINE_VSYSCALL implementation.
The macro is now expect to be called inside the HAVE_*_VSYSCALL and
it does not fallback to call the syscall (this should be done
explicitly). Also, calling the INLINE_VSYSCALL macro with a vDSO name
not defined by the architecture will trigger a build error.
Internally it is also simplified to expected a negative value with
errno embedded similar on how the Linux syscall work. It simplifies
error handling.
Both clock_getres and clock_gettime vDSO code for time64_t were
removed since there is no vDSO setup code for the symbol (currently
an architecture can not set HAVE_CLOCK_GETTIME64_VSYSCALL).
Checked on i686-linux-gnu (default and with --enable-kernel=5.1),
x86_64-linux-gnu, aarch64-linux-gnu, and powerpc64le-linux-gnu.
I also checked against a build to mips64-linux-gnu and
sparc64-linux-gnu.
[1] https://sourceware.org/ml/libc-help/2019-12/msg00014.html
---
.../unix/sysv/linux/aarch64/gettimeofday.c | 8 +-
sysdeps/unix/sysv/linux/clock_getres.c | 43 ++++++----
sysdeps/unix/sysv/linux/clock_gettime.c | 45 +++++++----
sysdeps/unix/sysv/linux/getcpu.c | 13 +--
sysdeps/unix/sysv/linux/mips/sysdep.h | 16 ----
.../sysv/linux/powerpc/get_timebase_freq.c | 3 +-
.../unix/sysv/linux/powerpc/gettimeofday.c | 8 +-
.../sysv/linux/powerpc/powerpc32/sysdep.h | 10 ++-
.../sysv/linux/powerpc/powerpc64/sysdep.h | 10 ++-
sysdeps/unix/sysv/linux/powerpc/time.c | 9 +--
sysdeps/unix/sysv/linux/sched_getcpu.c | 17 ++--
sysdeps/unix/sysv/linux/sparc/sysdep.h | 7 --
sysdeps/unix/sysv/linux/sysdep-vdso.h | 79 +++++--------------
sysdeps/unix/sysv/linux/x86/gettimeofday.c | 8 +-
sysdeps/unix/sysv/linux/x86/time.c | 11 +--
15 files changed, 117 insertions(+), 170 deletions(-)
Comments
* Adhemerval Zanella:
> diff --git a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
> index 7e772e05ce..07d38466e2 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
> +++ b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
> @@ -22,10 +22,6 @@
>
> #include <time.h>
> #include <sysdep.h>
> -
> -#ifdef HAVE_GETTIMEOFDAY_VSYSCALL
> -# define HAVE_VSYSCALL
> -#endif
> #include <sysdep-vdso.h>
>
> /* Used as a fallback in the ifunc resolver if VDSO is not available
> @@ -36,7 +32,9 @@ __gettimeofday_vsyscall (struct timeval *restrict tv, void *restrict tz)
> if (__glibc_unlikely (tz != 0))
> memset (tz, 0, sizeof *tz);
>
> - return INLINE_VSYSCALL (gettimeofday, 2, tv, tz);
> + if (INLINE_VSYSCALL (gettimeofday, 2, tv, tz) == 0)
> + return 0;
> + return INLINE_SYSCALL_CALL (gettimeofday, tv, tz);
> }
Given that this is the fallback function why do we try INLINE_VSYSCALL
first?
(The static case would need adjusting, of course.)
> diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
> index 875c4fe905..4ea56c9a4b 100644
> --- a/sysdeps/unix/sysv/linux/clock_gettime.c
> +++ b/sysdeps/unix/sysv/linux/clock_gettime.c
> @@ -21,10 +21,6 @@
> #include <errno.h>
> #include <time.h>
> #include "kernel-posix-cpu-timers.h"
> -
> -#ifdef HAVE_CLOCK_GETTIME_VSYSCALL
> -# define HAVE_VSYSCALL
> -#endif
> #include <sysdep-vdso.h>
>
> #include <shlib-compat.h>
> @@ -33,24 +29,39 @@
> int
> __clock_gettime64 (clockid_t clock_id, struct __timespec64 *tp)
> {
> + int r = -1;
> +
> #ifdef __ASSUME_TIME64_SYSCALLS
> + /* 64 bit ABIs or Newer 32-bit ABIs that only support 64-bit time_t. */
> +# ifdef __NR_clock_gettime64
> + r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
> +# else
> +# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
> + r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
> +# endif
> + if (r == -1)
> + r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, tp);
Why do you check __NR_clock_gettime64 first? Won't this make the vDSO
unused?
Thanks,
Florian
On 13/12/2019 08:51, Florian Weimer wrote:
> * Adhemerval Zanella:
>
>> diff --git a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>> index 7e772e05ce..07d38466e2 100644
>> --- a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>> +++ b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>> @@ -22,10 +22,6 @@
>>
>> #include <time.h>
>> #include <sysdep.h>
>> -
>> -#ifdef HAVE_GETTIMEOFDAY_VSYSCALL
>> -# define HAVE_VSYSCALL
>> -#endif
>> #include <sysdep-vdso.h>
>>
>> /* Used as a fallback in the ifunc resolver if VDSO is not available
>> @@ -36,7 +32,9 @@ __gettimeofday_vsyscall (struct timeval *restrict tv, void *restrict tz)
>> if (__glibc_unlikely (tz != 0))
>> memset (tz, 0, sizeof *tz);
>>
>> - return INLINE_VSYSCALL (gettimeofday, 2, tv, tz);
>> + if (INLINE_VSYSCALL (gettimeofday, 2, tv, tz) == 0)
>> + return 0;
>> + return INLINE_SYSCALL_CALL (gettimeofday, tv, tz);
>> }
>
> Given that this is the fallback function why do we try INLINE_VSYSCALL
> first?
>
> (The static case would need adjusting, of course.)
Because it will be used on static build and the fallback case will be
unlikely. But I can add static only case that uses vDSO plus syscall and
change the shared fallback case that just issues the syscall.
My idea is to eventually consolidate the aarch64/powerpc64/x86_64
gettimeofday implementation, since they are essentially the same.
>
>> diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
>> index 875c4fe905..4ea56c9a4b 100644
>> --- a/sysdeps/unix/sysv/linux/clock_gettime.c
>> +++ b/sysdeps/unix/sysv/linux/clock_gettime.c
>> @@ -21,10 +21,6 @@
>> #include <errno.h>
>> #include <time.h>
>> #include "kernel-posix-cpu-timers.h"
>> -
>> -#ifdef HAVE_CLOCK_GETTIME_VSYSCALL
>> -# define HAVE_VSYSCALL
>> -#endif
>> #include <sysdep-vdso.h>
>>
>> #include <shlib-compat.h>
>> @@ -33,24 +29,39 @@
>> int
>> __clock_gettime64 (clockid_t clock_id, struct __timespec64 *tp)
>> {
>> + int r = -1;
>> +
>> #ifdef __ASSUME_TIME64_SYSCALLS
>> + /* 64 bit ABIs or Newer 32-bit ABIs that only support 64-bit time_t. */
>> +# ifdef __NR_clock_gettime64
>> + r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
>> +# else
>> +# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
>> + r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
>> +# endif
>> + if (r == -1)
>> + r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, tp);
>
> Why do you check __NR_clock_gettime64 first? Won't this make the vDSO
> unused?
The vDSO support for clock_gettime64 was added later in this set. I
explicit removed because even if an architecture sets
HAVE_CLOCK_GETTIME64_VSYSCALL, it won't build.
* Adhemerval Zanella:
> On 13/12/2019 08:51, Florian Weimer wrote:
>> * Adhemerval Zanella:
>>
>>> diff --git a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>>> index 7e772e05ce..07d38466e2 100644
>>> --- a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>>> +++ b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>>> @@ -22,10 +22,6 @@
>>>
>>> #include <time.h>
>>> #include <sysdep.h>
>>> -
>>> -#ifdef HAVE_GETTIMEOFDAY_VSYSCALL
>>> -# define HAVE_VSYSCALL
>>> -#endif
>>> #include <sysdep-vdso.h>
>>>
>>> /* Used as a fallback in the ifunc resolver if VDSO is not available
>>> @@ -36,7 +32,9 @@ __gettimeofday_vsyscall (struct timeval *restrict tv, void *restrict tz)
>>> if (__glibc_unlikely (tz != 0))
>>> memset (tz, 0, sizeof *tz);
>>>
>>> - return INLINE_VSYSCALL (gettimeofday, 2, tv, tz);
>>> + if (INLINE_VSYSCALL (gettimeofday, 2, tv, tz) == 0)
>>> + return 0;
>>> + return INLINE_SYSCALL_CALL (gettimeofday, tv, tz);
>>> }
>>
>> Given that this is the fallback function why do we try INLINE_VSYSCALL
>> first?
>>
>> (The static case would need adjusting, of course.)
>
> Because it will be used on static build and the fallback case will be
> unlikely. But I can add static only case that uses vDSO plus syscall and
> change the shared fallback case that just issues the syscall.
I think that would make more sense, yes.
>>> diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
>>> index 875c4fe905..4ea56c9a4b 100644
>>> --- a/sysdeps/unix/sysv/linux/clock_gettime.c
>>> +++ b/sysdeps/unix/sysv/linux/clock_gettime.c
>>> @@ -21,10 +21,6 @@
>>> #include <errno.h>
>>> #include <time.h>
>>> #include "kernel-posix-cpu-timers.h"
>>> -
>>> -#ifdef HAVE_CLOCK_GETTIME_VSYSCALL
>>> -# define HAVE_VSYSCALL
>>> -#endif
>>> #include <sysdep-vdso.h>
>>>
>>> #include <shlib-compat.h>
>>> @@ -33,24 +29,39 @@
>>> int
>>> __clock_gettime64 (clockid_t clock_id, struct __timespec64 *tp)
>>> {
>>> + int r = -1;
>>> +
>>> #ifdef __ASSUME_TIME64_SYSCALLS
>>> + /* 64 bit ABIs or Newer 32-bit ABIs that only support 64-bit time_t. */
>>> +# ifdef __NR_clock_gettime64
>>> + r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
>>> +# else
>>> +# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
>>> + r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
>>> +# endif
>>> + if (r == -1)
>>> + r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, tp);
>>
>> Why do you check __NR_clock_gettime64 first? Won't this make the vDSO
>> unused?
>
> The vDSO support for clock_gettime64 was added later in this set. I
> explicit removed because even if an architecture sets
> HAVE_CLOCK_GETTIME64_VSYSCALL, it won't build.
Ah, I see it now:
/* Get current value of CLOCK and store it in TP. */
int
__clock_gettime64 (clockid_t clock_id, struct __timespec64 *tp)
{
int r = -1;
#ifdef __ASSUME_TIME64_SYSCALLS
/* 64 bit ABIs or Newer 32-bit ABIs that only support 64-bit time_t. */
# ifdef __NR_clock_gettime64
# ifdef HAVE_CLOCK_GETTIME64_VSYSCALL
r = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);
# endif
if (r == -1)
r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
# else
# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
# endif
if (r == -1)
r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, tp);
# endif
#else
/* Old 32-bit ABI with possible 64-bit time_t support. */
# ifdef __NR_clock_gettime64
# ifdef HAVE_CLOCK_GETTIME64_VSYSCALL
r = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);
# endif
if (r == -1)
r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
# endif
if (r == -1)
{
/* Fallback code that uses 32-bit support. */
struct timespec tp32;
# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, &tp32);
# endif
if (r == -1)
r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, &tp32);
if (r == 0)
*tp = valid_timespec_to_timespec64 (tp32);
}
#endif
return r;
}
This looks quite ugly to me.
If I read it correctly, it still does not cover for 32-bit the case of
an old kernel without clock_gettime64 support. Here, INLINE_VSYSCALL
for clock_gettimeofday64 will fail (without a context switch), then
INLINE_SYSCALL_CALL will fail, *with* a context switch, and only then,
and only then the INLINE_VSYSCALL call for clock_gettimeofday will
suceed.
Since this is used to implement clock_gettime:
#if __TIMESIZE != 64
int
__clock_gettime (clockid_t clock_id, struct timespec *tp)
{
int ret;
struct __timespec64 tp64;
ret = __clock_gettime64 (clock_id, &tp64);
if (ret == 0)
{
if (! in_time_t_range (tp64.tv_sec))
{
__set_errno (EOVERFLOW);
return -1;
}
*tp = valid_timespec64_to_timespec (tp64);
}
return ret;
}
#endif
it will impact quite a lot of installations. We know that
clock_gettimeofday performance is critical to many users, eveon i386.
The main question is whether it is worth supporting clock_gettime64
without vDSO support. If it is not, at startup, the loader should
select a function pointer for the clock_gettime64 implementation used by
the clock_gettime64 wrapper:
(a) kernel-provided clock_gettime64 from the vDSO
(b) glibc clock_gettime64 implementation on top of clock_gettime vDSO
(c) glibc clock_gettime64 implementation on top of clock_gettime syscall
Checking the presence of vDSO symbols is reasonably efficient because
it's just a hash table lookup (especially if _dl_lookup_direct is used).
We would have two indirect calls for the legacy vDSO case, but getting
rid of that would mean to use an IFUNC for clock_gettime and
clock_gettime64, driving up complexity again.
Unfortunately, writing (b) and (c) may not be easy on architectures
where INTERNAL_VSYSCALL_CALL uses a non-standard calling convention with
inline assembly, due to lack of proper GCC support for it.
If we need to support systems without clock_gettime64 in the vDSO, we
have a problem because that requires system call probing, which is
probably not something that we want to do at startup.
Thanks,
Florian
On 13/12/2019 11:03, Florian Weimer wrote:
> * Adhemerval Zanella:
>
>> On 13/12/2019 08:51, Florian Weimer wrote:
>>> * Adhemerval Zanella:
>>>
>>>> diff --git a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>>>> index 7e772e05ce..07d38466e2 100644
>>>> --- a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>>>> +++ b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>>>> @@ -22,10 +22,6 @@
>>>>
>>>> #include <time.h>
>>>> #include <sysdep.h>
>>>> -
>>>> -#ifdef HAVE_GETTIMEOFDAY_VSYSCALL
>>>> -# define HAVE_VSYSCALL
>>>> -#endif
>>>> #include <sysdep-vdso.h>
>>>>
>>>> /* Used as a fallback in the ifunc resolver if VDSO is not available
>>>> @@ -36,7 +32,9 @@ __gettimeofday_vsyscall (struct timeval *restrict tv, void *restrict tz)
>>>> if (__glibc_unlikely (tz != 0))
>>>> memset (tz, 0, sizeof *tz);
>>>>
>>>> - return INLINE_VSYSCALL (gettimeofday, 2, tv, tz);
>>>> + if (INLINE_VSYSCALL (gettimeofday, 2, tv, tz) == 0)
>>>> + return 0;
>>>> + return INLINE_SYSCALL_CALL (gettimeofday, tv, tz);
>>>> }
>>>
>>> Given that this is the fallback function why do we try INLINE_VSYSCALL
>>> first?
>>>
>>> (The static case would need adjusting, of course.)
>>
>> Because it will be used on static build and the fallback case will be
>> unlikely. But I can add static only case that uses vDSO plus syscall and
>> change the shared fallback case that just issues the syscall.
>
> I think that would make more sense, yes.
>
>>>> diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
>>>> index 875c4fe905..4ea56c9a4b 100644
>>>> --- a/sysdeps/unix/sysv/linux/clock_gettime.c
>>>> +++ b/sysdeps/unix/sysv/linux/clock_gettime.c
>>>> @@ -21,10 +21,6 @@
>>>> #include <errno.h>
>>>> #include <time.h>
>>>> #include "kernel-posix-cpu-timers.h"
>>>> -
>>>> -#ifdef HAVE_CLOCK_GETTIME_VSYSCALL
>>>> -# define HAVE_VSYSCALL
>>>> -#endif
>>>> #include <sysdep-vdso.h>
>>>>
>>>> #include <shlib-compat.h>
>>>> @@ -33,24 +29,39 @@
>>>> int
>>>> __clock_gettime64 (clockid_t clock_id, struct __timespec64 *tp)
>>>> {
>>>> + int r = -1;
>>>> +
>>>> #ifdef __ASSUME_TIME64_SYSCALLS
>>>> + /* 64 bit ABIs or Newer 32-bit ABIs that only support 64-bit time_t. */
>>>> +# ifdef __NR_clock_gettime64
>>>> + r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
>>>> +# else
>>>> +# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
>>>> + r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
>>>> +# endif
>>>> + if (r == -1)
>>>> + r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, tp);
>>>
>>> Why do you check __NR_clock_gettime64 first? Won't this make the vDSO
>>> unused?
>>
>> The vDSO support for clock_gettime64 was added later in this set. I
>> explicit removed because even if an architecture sets
>> HAVE_CLOCK_GETTIME64_VSYSCALL, it won't build.
>
> Ah, I see it now:
>
> /* Get current value of CLOCK and store it in TP. */
> int
> __clock_gettime64 (clockid_t clock_id, struct __timespec64 *tp)
> {
> int r = -1;
>
> #ifdef __ASSUME_TIME64_SYSCALLS
> /* 64 bit ABIs or Newer 32-bit ABIs that only support 64-bit time_t. */
> # ifdef __NR_clock_gettime64
> # ifdef HAVE_CLOCK_GETTIME64_VSYSCALL
> r = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);
> # endif
> if (r == -1)
> r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
> # else
> # ifdef HAVE_CLOCK_GETTIME_VSYSCALL
> r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
> # endif
> if (r == -1)
> r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, tp);
> # endif
> #else
> /* Old 32-bit ABI with possible 64-bit time_t support. */
> # ifdef __NR_clock_gettime64
> # ifdef HAVE_CLOCK_GETTIME64_VSYSCALL
> r = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);
> # endif
> if (r == -1)
> r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
> # endif
> if (r == -1)
> {
> /* Fallback code that uses 32-bit support. */
> struct timespec tp32;
> # ifdef HAVE_CLOCK_GETTIME_VSYSCALL
> r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, &tp32);
> # endif
> if (r == -1)
> r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, &tp32);
> if (r == 0)
> *tp = valid_timespec_to_timespec64 (tp32);
> }
> #endif
>
> return r;
> }
>
> This looks quite ugly to me.
It is with current constraints. The idea is that eventually
!__ASSUME_TIME64_SYSCALLS path will be phase out.
>
> If I read it correctly, it still does not cover for 32-bit the case of
> an old kernel without clock_gettime64 support. Here, INLINE_VSYSCALL
> for clock_gettimeofday64 will fail (without a context switch), then
> INLINE_SYSCALL_CALL will fail, *with* a context switch, and only then,
> and only then the INLINE_VSYSCALL call for clock_gettimeofday will
> suceed.
It does cover, it is just not optimal for older kernels. This patch is
following the current semantic on the clock_gettime64. I see what you
are proposing as an additional optimization.
>
> Since this is used to implement clock_gettime:
>
> #if __TIMESIZE != 64
> int
> __clock_gettime (clockid_t clock_id, struct timespec *tp)
> {
> int ret;
> struct __timespec64 tp64;
>
> ret = __clock_gettime64 (clock_id, &tp64);
>
> if (ret == 0)
> {
> if (! in_time_t_range (tp64.tv_sec))
> {
> __set_errno (EOVERFLOW);
> return -1;
> }
>
> *tp = valid_timespec64_to_timespec (tp64);
> }
>
> return ret;
> }
> #endif
>
> it will impact quite a lot of installations. We know that
> clock_gettimeofday performance is critical to many users, eveon i386.
I agree and that's why I initially preferred to not implement the
old time32 on top of the time64 ones. But it was decided and this
is the most straightforward way to progressively test the new
interfaces without require doing a full switch to time64.
>
> The main question is whether it is worth supporting clock_gettime64
> without vDSO support. If it is not, at startup, the loader should
> select a function pointer for the clock_gettime64 implementation used by
> the clock_gettime64 wrapper:
>
> (a) kernel-provided clock_gettime64 from the vDSO
> (b) glibc clock_gettime64 implementation on top of clock_gettime vDSO
> (c) glibc clock_gettime64 implementation on top of clock_gettime syscall
>
> Checking the presence of vDSO symbols is reasonably efficient because
> it's just a hash table lookup (especially if _dl_lookup_direct is used).
> We would have two indirect calls for the legacy vDSO case, but getting
> rid of that would mean to use an IFUNC for clock_gettime and
> clock_gettime64, driving up complexity again.
>
> Unfortunately, writing (b) and (c) may not be easy on architectures
> where INTERNAL_VSYSCALL_CALL uses a non-standard calling convention with
> inline assembly, due to lack of proper GCC support for it.
>
> If we need to support systems without clock_gettime64 in the vDSO, we
> have a problem because that requires system call probing, which is
> probably not something that we want to do at startup.
Linux is moving to make the vDSO infrastructure more generic and easy
so the architectures will require less boilerplate to enable it. However
I do see that it might take some time to enable on all architectures and
there might be some kernel configuration that might explicit disable
clock_gettime64 vDSO.
But I think essentially what you are suggesting is an optimization to a
scenario that in practice should be unusual: a glibc build with a v5.1+
kernel headers, but deployed in a older kernel without time64 support.
This scenario could be a more common possibility to static build, so
an possible option is to use the a global flag atomically set in the
case of ENOSYS (as discussed in time64_t previously). We could set
it by syscall interface or globally to assume or not kernel support
time64_t. But again I think this should be handled as an optimization,
rather than a requisite/blocker.
* Adhemerval Zanella:
> But I think essentially what you are suggesting is an optimization to a
> scenario that in practice should be unusual: a glibc build with a v5.1+
> kernel headers, but deployed in a older kernel without time64 support.
I don't think that's quite right. It will affect any future Fedora
release that is deployed on current container environments, irrespective
of container technology. In the past, vendors were really slow to
rebase kernels.
Furthermore, I think we have tentative agreement that we want to move to
built-in system call tables to make it clearer what functionality we
support. In particular, we viewed this as a requirement for rseq
support. While it seems unlikely at this point that rseq support will
make it into the upcoming release, I still hope to contribute my syscall
tables patch next week. (The patch is done, but the auto-updating of
the tables doesn't quite work yet the way Joseph would like it.)
It's also not just an optimization because the selection logic should be
generic and could be written once because it does not depend on the
function pointer.
But unfortunately, probing the way I suggested will not work. It's
incompatible with existing seccomp filters running on newer kernels
because they will cause the syscall in the vDSO to fail with ENOSYS. So
we still need a fallback path unfortunately. If I'm right, this
invalidates a previous review comment of mine regarding the fallback
path after INLINE_VSYSCALL.
Thanks,
Florian
* Florian Weimer:
> * Adhemerval Zanella:
>
>> diff --git a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>> index 7e772e05ce..07d38466e2 100644
>> --- a/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>> +++ b/sysdeps/unix/sysv/linux/aarch64/gettimeofday.c
>> @@ -22,10 +22,6 @@
>>
>> #include <time.h>
>> #include <sysdep.h>
>> -
>> -#ifdef HAVE_GETTIMEOFDAY_VSYSCALL
>> -# define HAVE_VSYSCALL
>> -#endif
>> #include <sysdep-vdso.h>
>>
>> /* Used as a fallback in the ifunc resolver if VDSO is not available
>> @@ -36,7 +32,9 @@ __gettimeofday_vsyscall (struct timeval *restrict tv, void *restrict tz)
>> if (__glibc_unlikely (tz != 0))
>> memset (tz, 0, sizeof *tz);
>>
>> - return INLINE_VSYSCALL (gettimeofday, 2, tv, tz);
>> + if (INLINE_VSYSCALL (gettimeofday, 2, tv, tz) == 0)
>> + return 0;
>> + return INLINE_SYSCALL_CALL (gettimeofday, tv, tz);
>> }
>
> Given that this is the fallback function why do we try INLINE_VSYSCALL
> first?
As I mentioned in my other message, seccomp filters make INLINE_VSYSCALL
without fallback tricky because the vDSO could use a filtered system
call, while the direct syscall path would still succeed because it uses
a system call known to and approved by the seccomp filter.
Thanks,
Florian
On 13/12/2019 11:49, Florian Weimer wrote:
> * Adhemerval Zanella:
>
>> But I think essentially what you are suggesting is an optimization to a
>> scenario that in practice should be unusual: a glibc build with a v5.1+
>> kernel headers, but deployed in a older kernel without time64 support.
>
> I don't think that's quite right. It will affect any future Fedora
> release that is deployed on current container environments, irrespective
> of container technology. In the past, vendors were really slow to
> rebase kernels.
If this scenario is indeed something usual for new glibc installations,
a generic build to enable both time64 and time support will eventually
require some probing to get kernel support.
>
> Furthermore, I think we have tentative agreement that we want to move to
> built-in system call tables to make it clearer what functionality we
> support. In particular, we viewed this as a requirement for rseq
> support. While it seems unlikely at this point that rseq support will
> make it into the upcoming release, I still hope to contribute my syscall
> tables patch next week. (The patch is done, but the auto-updating of
> the tables doesn't quite work yet the way Joseph would like it.)
>
> It's also not just an optimization because the selection logic should be
> generic and could be written once because it does not depend on the
> function pointer.
Even with syscall tables being update with latest kernel releases, the
optimization I see that you are suggesting it to avoid probing on
every syscall.
>
> But unfortunately, probing the way I suggested will not work. It's
> incompatible with existing seccomp filters running on newer kernels
> because they will cause the syscall in the vDSO to fail with ENOSYS. So
> we still need a fallback path unfortunately. If I'm right, this
> invalidates a previous review comment of mine regarding the fallback
> path after INLINE_VSYSCALL.
The fallback after the vDSO is essentially because some vDSO implementation
does not issue the syscall itself, but rather return ENOSYS. If I recall
correctly it was the case for mips for some 3.1x version, I would expected
that this is behaviour an outlier and usual vDSO support it to call the
syscall.
And the fallback also work if seccomp triggers an ENOSYS all well. So
what I think it might an option for !__ASSUME_TIME64_SYSCALLS with
an optimization to avoid probing is:
int r == -1;
static int time64_support = 1;
if (atomic_load_relaxed (&time64_support) == 1)
{
#ifdef HAVE_CLOCK_GETTIME64_VSYSCALL
/* It assumes that vDSO will always fallback to syscall
for invalid timers. */
r = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);
#else
r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
#endif
if (r == ENOSYS)
{
atomic_store_relaxed (&time64_support, 0);
r = -1;
}
}
if (r == -1)
{
/* Fallback code that uses 32-bit support. */
struct timespec tp32;
# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
/* Some vDSO implementation might not call the syscall for
invalid timers. */
r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, &tp32);
# endif
if (r == -1)
r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, &tp32);
if (r == 0)
*tp = valid_timespec_to_timespec64 (tp32);
}
return r;
@@ -22,10 +22,6 @@
#include <time.h>
#include <sysdep.h>
-
-#ifdef HAVE_GETTIMEOFDAY_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
/* Used as a fallback in the ifunc resolver if VDSO is not available
@@ -36,7 +32,9 @@ __gettimeofday_vsyscall (struct timeval *restrict tv, void *restrict tz)
if (__glibc_unlikely (tz != 0))
memset (tz, 0, sizeof *tz);
- return INLINE_VSYSCALL (gettimeofday, 2, tv, tz);
+ if (INLINE_VSYSCALL (gettimeofday, 2, tv, tz) == 0)
+ return 0;
+ return INLINE_SYSCALL_CALL (gettimeofday, tv, tz);
}
#ifdef SHARED
@@ -20,9 +20,6 @@
#include <errno.h>
#include <time.h>
-#ifdef HAVE_CLOCK_GETRES_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
#include <shlib-compat.h>
#include <kernel-features.h>
@@ -31,25 +28,39 @@
int
__clock_getres64 (clockid_t clock_id, struct __timespec64 *res)
{
+ int r = -1;
+
#ifdef __ASSUME_TIME64_SYSCALLS
-# ifndef __NR_clock_getres_time64
- return INLINE_VSYSCALL (clock_getres, 2, clock_id, res);
+ /* 64 bit ABIs or Newer 32-bit ABIs that only support 64-bit time_t. */
+# ifdef __NR_clock_getres_time64
+ r = INLINE_SYSCALL_CALL (clock_getres_time64, clock_id, res);
# else
- return INLINE_SYSCALL (clock_getres_time64, 2, clock_id, res);
+# ifdef HAVE_CLOCK_GETRES_VSYSCALL
+ r = INLINE_VSYSCALL (clock_getres, 2, clock_id, res);
+# endif
+ if (r == -1)
+ r = INLINE_SYSCALL_CALL (clock_getres, clock_id, res);
# endif
#else
+ /* Old 32-bit ABI with possible 64-bit time_t support. */
# ifdef __NR_clock_getres_time64
- int ret = INLINE_SYSCALL (clock_getres_time64, 2, clock_id, res);
- if (ret == 0 || errno != ENOSYS)
- return ret;
+ r = INLINE_SYSCALL_CALL (clock_getres_time64, clock_id, res);
# endif
- struct timespec ts32;
- int retval = INLINE_VSYSCALL (clock_getres, 2, clock_id, &ts32);
- if (! retval && res)
- *res = valid_timespec_to_timespec64 (ts32);
-
- return retval;
+ if (r == -1)
+ {
+ /* Fallback code that uses 32-bit support. */
+ struct timespec ts32;
+# ifdef HAVE_CLOCK_GETRES_VSYSCALL
+ r = INLINE_VSYSCALL (clock_getres, 2, clock_id, &ts32);
+# endif
+ if (r == -1)
+ r = INLINE_SYSCALL_CALL (clock_getres, clock_id, &ts32);
+ if (r == 0)
+ *res = valid_timespec_to_timespec64 (ts32);
+ }
#endif
+
+ return r;
}
#if __TIMESIZE != 64
@@ -60,7 +71,7 @@ __clock_getres (clockid_t clock_id, struct timespec *res)
int retval;
retval = __clock_getres64 (clock_id, &ts64);
- if (! retval && res)
+ if (retval == 0 && res ! = NULL)
*res = valid_timespec64_to_timespec (ts64);
return retval;
@@ -21,10 +21,6 @@
#include <errno.h>
#include <time.h>
#include "kernel-posix-cpu-timers.h"
-
-#ifdef HAVE_CLOCK_GETTIME_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
#include <shlib-compat.h>
@@ -33,24 +29,39 @@
int
__clock_gettime64 (clockid_t clock_id, struct __timespec64 *tp)
{
+ int r = -1;
+
#ifdef __ASSUME_TIME64_SYSCALLS
-# ifndef __NR_clock_gettime64
-# define __NR_clock_gettime64 __NR_clock_gettime
-# define __vdso_clock_gettime64 __vdso_clock_gettime
+ /* 64 bit ABIs or Newer 32-bit ABIs that only support 64-bit time_t. */
+# ifdef __NR_clock_gettime64
+ r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
+# else
+# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
+ r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
+# endif
+ if (r == -1)
+ r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, tp);
# endif
- return INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);
#else
-# if defined HAVE_CLOCK_GETTIME64_VSYSCALL
- int ret64 = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, tp);
- if (ret64 == 0 || errno != ENOSYS)
- return ret64;
+ /* Old 32-bit ABI with possible 64-bit time_t support. */
+# ifdef __NR_clock_gettime64
+ r = INLINE_SYSCALL_CALL (clock_gettime64, clock_id, tp);
# endif
- struct timespec tp32;
- int ret = INLINE_VSYSCALL (clock_gettime, 2, clock_id, &tp32);
- if (ret == 0)
- *tp = valid_timespec_to_timespec64 (tp32);
- return ret;
+ if (r == -1)
+ {
+ /* Fallback code that uses 32-bit support. */
+ struct timespec tp32;
+# ifdef HAVE_CLOCK_GETTIME_VSYSCALL
+ r = INLINE_VSYSCALL (clock_gettime, 2, clock_id, &tp32);
+# endif
+ if (r == -1)
+ r = INLINE_SYSCALL_CALL (clock_gettime, clock_id, &tp32);
+ if (r == 0)
+ *tp = valid_timespec_to_timespec64 (tp32);
+ }
#endif
+
+ return r;
}
#if __TIMESIZE != 64
@@ -18,21 +18,16 @@
#include <errno.h>
#include <sched.h>
#include <sysdep.h>
-
-#ifdef HAVE_GETCPU_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
int
__getcpu (unsigned int *cpu, unsigned int *node)
{
-#ifdef __NR_getcpu
- return INLINE_VSYSCALL (getcpu, 3, cpu, node, NULL);
-#else
- __set_errno (ENOSYS);
- return -1;
+#ifdef HAVE_GETCPU_VSYSCALL
+ if (INLINE_VSYSCALL (getcpu, 3, cpu, node, NULL) == 0)
+ return 0;
#endif
+ return INLINE_SYSCALL_CALL (getcpu, cpu, node, NULL);
}
weak_alias (__getcpu, getcpu)
libc_hidden_def (__getcpu)
@@ -22,19 +22,3 @@
/* List of system calls which are supported as vsyscalls. */
#define HAVE_CLOCK_GETTIME_VSYSCALL "__vdso_clock_gettime"
#define HAVE_GETTIMEOFDAY_VSYSCALL "__vdso_gettimeofday"
-
-#ifndef __ASSEMBLER__
-
-/* Standard MIPS syscalls have an error flag, and return a positive errno
- when the error flag is set. Emulate this behaviour for vsyscalls so that
- the INTERNAL_SYSCALL_{ERROR_P,ERRNO} macros work correctly. */
-#define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
- ({ \
- long _ret = funcptr (args); \
- err = ((unsigned long) (_ret) >= (unsigned long) -4095L); \
- if (err) \
- _ret = -_ret; \
- _ret; \
- })
-
-#endif /* __ASSEMBLER__ */
@@ -106,7 +106,6 @@ __get_timebase_freq (void)
if (vdsop == NULL)
return get_timebase_freq_fallback ();
- INTERNAL_SYSCALL_DECL (err);
- return INTERNAL_VSYSCALL_CALL_TYPE (vdsop, err, uint64_t, 0);
+ return INTERNAL_VSYSCALL_CALL_TYPE (vdsop, uint64_t, 0);
}
weak_alias (__get_timebase_freq, __ppc_get_timebase_freq)
@@ -17,10 +17,6 @@
#include <time.h>
#include <sysdep.h>
-
-#ifdef HAVE_GETTIMEOFDAY_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
static int
@@ -29,7 +25,9 @@ __gettimeofday_syscall (struct timeval *restrict tv, void *restrict tz)
if (__glibc_unlikely (tz != 0))
memset (tz, 0, sizeof *tz);
- return INLINE_VSYSCALL (gettimeofday, 2, tv, tz);
+ if (INLINE_VSYSCALL (gettimeofday, 2, tv, tz) == 0)
+ return 0;
+ return INLINE_SYSCALL_CALL (gettimeofday, tv, tz);
}
#ifdef SHARED
@@ -41,7 +41,7 @@
function call, with the exception of LR (which is needed for the
"sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
an error return status). */
-# define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, type, nr, args...) \
+# define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, type, nr, args...) \
({ \
register void *r0 __asm__ ("r0"); \
register long int r3 __asm__ ("r3"); \
@@ -63,13 +63,15 @@
: "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), \
"+r" (r8), "+r" (r9), "+r" (r10), "+r" (r11), "+r" (r12) \
: : "cr0", "ctr", "lr", "memory"); \
- err = (long int) r0; \
+ long int err = (long int) r0; \
__asm__ __volatile__ ("" : "=r" (rval) : "r" (r3), "r" (r4)); \
+ if (INTERNAL_SYSCALL_ERROR_P (rval, err)) \
+ rval = -rval; \
rval; \
})
-#define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
- INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, long int, nr, args)
+#define INTERNAL_VSYSCALL_CALL(funcptr, nr, args...) \
+ INTERNAL_VSYSCALL_CALL_TYPE(funcptr, long int, nr, args)
# undef INLINE_SYSCALL
# define INLINE_SYSCALL(name, nr, args...) \
@@ -51,7 +51,7 @@
gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
the negation of the return value in the kernel gets reverted. */
-#define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, type, nr, args...) \
+#define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, type, nr, args...) \
({ \
register void *r0 __asm__ ("r0"); \
register long int r3 __asm__ ("r3"); \
@@ -70,13 +70,15 @@
: "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), \
"+r" (r7), "+r" (r8) \
: : "r9", "r10", "r11", "r12", "cr0", "ctr", "lr", "memory"); \
- err = (long int) r0; \
+ long int err = (long int) r0; \
__asm__ __volatile__ ("" : "=r" (rval) : "r" (r3)); \
+ if (INTERNAL_SYSCALL_ERROR_P (rval, err)) \
+ rval = -rval; \
rval; \
})
-#define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
- INTERNAL_VSYSCALL_CALL_TYPE(funcptr, err, long int, nr, args)
+#define INTERNAL_VSYSCALL_CALL(funcptr, nr, args...) \
+ INTERNAL_VSYSCALL_CALL_TYPE(funcptr, long int, nr, args)
/* This version is for kernels that implement system calls that
behave like function calls as far as register saving. */
@@ -18,16 +18,15 @@
#include <time.h>
#include <sysdep.h>
-
-#ifdef HAVE_TIME_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
static time_t
time_vsyscall (time_t *t)
{
- return INLINE_VSYSCALL (time, 1, t);
+ time_t ret = INLINE_VSYSCALL (time, 1, t);
+ if (ret != -1)
+ return ret;
+ return INLINE_SYSCALL_CALL (time, t);
}
#ifdef SHARED
@@ -18,22 +18,17 @@
#include <errno.h>
#include <sched.h>
#include <sysdep.h>
-
-#ifdef HAVE_GETCPU_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
int
sched_getcpu (void)
{
-#ifdef __NR_getcpu
unsigned int cpu;
- int r = INLINE_VSYSCALL (getcpu, 3, &cpu, NULL, NULL);
-
- return r == -1 ? r : cpu;
-#else
- __set_errno (ENOSYS);
- return -1;
+ int r = -1;
+#ifdef HAVE_GETCPU_VSYSCALL
+ r = INLINE_VSYSCALL (getcpu, 3, &cpu, NULL, NULL);
#endif
+ if (r == -1)
+ r = INLINE_SYSCALL_CALL (getcpu, &cpu, NULL, NULL);
+ return r == -1 ? r : cpu;
}
@@ -34,13 +34,6 @@
#else /* __ASSEMBLER__ */
-#define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
- ({ \
- long _ret = funcptr (args); \
- err = ((unsigned long) (_ret) >= (unsigned long) -4095L); \
- _ret; \
- })
-
# define VDSO_NAME "LINUX_2.6"
# define VDSO_HASH 61765110
@@ -21,69 +21,30 @@
#include <dl-vdso.h>
+/* Return the errno value as a negative value in case of an error or 0 or
+ positive value otherwise. */
#ifndef INTERNAL_VSYSCALL_CALL
-# define INTERNAL_VSYSCALL_CALL(funcptr, err, nr, args...) \
+# define INTERNAL_VSYSCALL_CALL(funcptr, nr, args...) \
funcptr (args)
#endif
-#ifdef HAVE_VSYSCALL
-
-# include <libc-vdso.h>
-
-# define INLINE_VSYSCALL(name, nr, args...) \
- ({ \
- __label__ out; \
- __label__ iserr; \
- INTERNAL_SYSCALL_DECL (sc_err); \
- long int sc_ret; \
- \
- __typeof (__vdso_##name) vdsop = __vdso_##name; \
- PTR_DEMANGLE (vdsop); \
- if (vdsop != NULL) \
- { \
- sc_ret = INTERNAL_VSYSCALL_CALL (vdsop, sc_err, nr, ##args); \
- if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
- goto out; \
- if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \
- goto iserr; \
- } \
- \
- sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \
- if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
- { \
- iserr: \
- __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
- sc_ret = -1L; \
- } \
- out: \
- sc_ret; \
- })
-
-# define INTERNAL_VSYSCALL(name, err, nr, args...) \
- ({ \
- __label__ out; \
- long v_ret; \
- \
- __typeof (__vdso_##name) vdsop = __vdso_##name; \
- PTR_DEMANGLE (vdsop); \
- if (vdsop != NULL) \
- { \
- v_ret = INTERNAL_VSYSCALL_CALL (vdsop, err, nr, ##args); \
- if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \
- || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \
- goto out; \
- } \
- v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \
- out: \
- v_ret; \
+#include <libc-vdso.h>
+
+#define INLINE_VSYSCALL(name, nr, args...) \
+ ({ \
+ long int sc_ret = -1; \
+ __typeof (__vdso_##name) vdsop = __vdso_##name; \
+ PTR_DEMANGLE (vdsop); \
+ if (vdsop != NULL) \
+ { \
+ sc_ret = INTERNAL_VSYSCALL_CALL (vdsop, nr, ##args); \
+ if ((unsigned long) sc_ret > -4096UL) \
+ { \
+ __set_errno (-sc_ret); \
+ sc_ret = -1L; \
+ } \
+ } \
+ sc_ret; \
})
-#else
-
-# define INLINE_VSYSCALL(name, nr, args...) \
- INLINE_SYSCALL (name, nr, ##args)
-# define INTERNAL_VSYSCALL(name, err, nr, args...) \
- INTERNAL_SYSCALL (name, err, nr, ##args)
-
-#endif /* USE_VSYSCALL && defined HAVE_VSYSCALL */
#endif /* SYSDEP_VDSO_LINUX_H */
@@ -18,10 +18,6 @@
#include <time.h>
#include <sysdep.h>
-
-#ifdef HAVE_GETTIMEOFDAY_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
static int
@@ -30,7 +26,9 @@ __gettimeofday_syscall (struct timeval *restrict tv, void *restrict tz)
if (__glibc_unlikely (tz != 0))
memset (tz, 0, sizeof *tz);
- return INLINE_VSYSCALL (gettimeofday, 2, tv, tz);
+ if (INLINE_VSYSCALL (gettimeofday, 2, tv, tz) == 0)
+ return 0;
+ return INLINE_SYSCALL_CALL (gettimeofday, tv, tz);
}
#ifdef SHARED
@@ -18,16 +18,17 @@
#include <time.h>
#include <sysdep.h>
-
-#ifdef HAVE_TIME_VSYSCALL
-# define HAVE_VSYSCALL
-#endif
#include <sysdep-vdso.h>
static time_t
time_vsyscall (time_t *t)
{
- return INLINE_VSYSCALL (time, 1, t);
+#ifdef HAVE_TIME_VSYSCALL
+ time_t ret = INLINE_VSYSCALL (time, 1, t);
+ if (ret != -1)
+ return ret;
+#endif
+ return INLINE_SYSCALL_CALL (time, t);
}
#ifdef SHARED