[RFC,v3,04/23] sysdeps/clock_gettime: Use clock_gettime64 if avaliable

Message ID 1f589e5a3fcaa4c103bc83169fffcdea9e1a6b2d.1563321715.git.alistair.francis@wdc.com
State New, archived
Headers

Commit Message

Alistair Francis July 17, 2019, 12:08 a.m. UTC
  Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
---
 nptl/pthread_mutex_timedlock.c          |  7 ++++++
 sysdeps/unix/sysv/linux/clock_gettime.c | 30 +++++++++++++++++++++++++
 2 files changed, 37 insertions(+)
  

Comments

Florian Weimer July 17, 2019, 5:38 a.m. UTC | #1
* Alistair Francis:

> diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
> index 52c258e33d..8d9cae7d87 100644
> --- a/nptl/pthread_mutex_timedlock.c
> +++ b/nptl/pthread_mutex_timedlock.c
> @@ -402,10 +402,17 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
>  		    /* Delay the thread until the timeout is reached.
>  		       Then return ETIMEDOUT.  */
>  		    struct timespec reltime;
> +#ifdef __NR_clock_gettime64
> +		    struct __timespec64 now;
> +
> +		    INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,
> +				      &now);
> +#else
>  		    struct timespec now;
>  
>  		    INTERNAL_SYSCALL (clock_gettime, __err, 2, clockid,
>  				      &now);
> +#endif
>  		    reltime.tv_sec = abstime->tv_sec - now.tv_sec;
>  		    reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
>  		    if (reltime.tv_nsec < 0)

I believe this needs to be updated for correctness (truncation of
tv_sec) if ever ported to architectures where __nanosleep_nocancel takes
a 32-bit time_t argument.  I don't know what our plans are regarding to
that.

If you had

#define __NR_clock_gettime64  __NR_clock_gettime

in <sysdep.h>, you wouldn't need this change.

> diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
> index 5fc47fb7dc..4832069c34 100644
> --- a/sysdeps/unix/sysv/linux/clock_gettime.c
> +++ b/sysdeps/unix/sysv/linux/clock_gettime.c
> @@ -27,10 +27,40 @@
>  #include <sysdep-vdso.h>
>  
>  /* Get current value of CLOCK and store it in TP.  */
> +
> +#if __WORDSIZE == 32
> +int
> +__clock_gettime (clockid_t clock_id, struct timespec *tp)
> +{
> +   int ret;
> +
> +#ifdef __NR_clock_gettime64
> +  struct __timespec64 tp64;
> +  ret = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, &tp64);
> +
> +  tp->tv_sec = tp64.tv_sec;
> +  tp->tv_nsec = tp64.tv_nsec;
> +
> +  if (! in_time_t_range (tp->tv_sec))
> +    {
> +      __set_errno (EOVERFLOW);
> +      return -1;
> +    }
> +#endif
> +
> +#ifndef __ASSUME_TIME64_SYSCALLS
> +  ret = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
> +#endif
> +
> +  return ret;
> +}

I think this has the same problems as the timespec_get patch.

Thanks,
Florian
  
Andreas Schwab July 17, 2019, 7:03 a.m. UTC | #2
On Jul 16 2019, Alistair Francis <alistair.francis@wdc.com> wrote:

> diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
> index 5fc47fb7dc..4832069c34 100644
> --- a/sysdeps/unix/sysv/linux/clock_gettime.c
> +++ b/sysdeps/unix/sysv/linux/clock_gettime.c
> @@ -27,10 +27,40 @@
>  #include <sysdep-vdso.h>
>  
>  /* Get current value of CLOCK and store it in TP.  */
> +
> +#if __WORDSIZE == 32
> +int
> +__clock_gettime (clockid_t clock_id, struct timespec *tp)
> +{
> +   int ret;
> +
> +#ifdef __NR_clock_gettime64
> +  struct __timespec64 tp64;
> +  ret = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, &tp64);
> +
> +  tp->tv_sec = tp64.tv_sec;
> +  tp->tv_nsec = tp64.tv_nsec;

This needs to check if clock_gettime64 was successful.

Andreas.
  
Arnd Bergmann July 17, 2019, 8:04 a.m. UTC | #3
On Wed, Jul 17, 2019 at 7:39 AM Florian Weimer <fweimer@redhat.com> wrote:
> > diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
> > index 52c258e33d..8d9cae7d87 100644
> > --- a/nptl/pthread_mutex_timedlock.c
> > +++ b/nptl/pthread_mutex_timedlock.c
> > @@ -402,10 +402,17 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
> >                   /* Delay the thread until the timeout is reached.
> >                      Then return ETIMEDOUT.  */
> >                   struct timespec reltime;
> > +#ifdef __NR_clock_gettime64
> > +                 struct __timespec64 now;
> > +
> > +                 INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,
> > +                                   &now);
> > +#else
> >                   struct timespec now;
> >
> >                   INTERNAL_SYSCALL (clock_gettime, __err, 2, clockid,
> >                                     &now);
> > +#endif
> >                   reltime.tv_sec = abstime->tv_sec - now.tv_sec;
> >                   reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
> >                   if (reltime.tv_nsec < 0)
>
> I believe this needs to be updated for correctness (truncation of
> tv_sec) if ever ported to architectures where __nanosleep_nocancel takes
> a 32-bit time_t argument.  I don't know what our plans are regarding to
> that.
>
> If you had
>
> #define __NR_clock_gettime64  __NR_clock_gettime
>
> in <sysdep.h>, you wouldn't need this change.

Could it be changed to just call internal __clock_gettime64() and
__nanosleep_nocancel_time64() functions that will then take care
of the architecture specifics like syscall number, vdso and
truncation?

        Arnd
  
Florian Weimer July 17, 2019, 8:44 a.m. UTC | #4
* Arnd Bergmann:

>> I believe this needs to be updated for correctness (truncation of
>> tv_sec) if ever ported to architectures where __nanosleep_nocancel takes
>> a 32-bit time_t argument.  I don't know what our plans are regarding to
>> that.
>>
>> If you had
>>
>> #define __NR_clock_gettime64  __NR_clock_gettime
>>
>> in <sysdep.h>, you wouldn't need this change.
>
> Could it be changed to just call internal __clock_gettime64() and
> __nanosleep_nocancel_time64() functions that will then take care
> of the architecture specifics like syscall number, vdso and
> truncation?

It depends on how these functions behave.  I think we need to figure out
if/how we maintain vDSO acceleration for clock_gettime and
clock_gettime64 first.

Thanks,
Florian
  
Arnd Bergmann July 17, 2019, 9:10 a.m. UTC | #5
On Wed, Jul 17, 2019 at 10:44 AM Florian Weimer <fweimer@redhat.com> wrote:
>
> * Arnd Bergmann:
>
> >> I believe this needs to be updated for correctness (truncation of
> >> tv_sec) if ever ported to architectures where __nanosleep_nocancel takes
> >> a 32-bit time_t argument.  I don't know what our plans are regarding to
> >> that.
> >>
> >> If you had
> >>
> >> #define __NR_clock_gettime64  __NR_clock_gettime
> >>
> >> in <sysdep.h>, you wouldn't need this change.
> >
> > Could it be changed to just call internal __clock_gettime64() and
> > __nanosleep_nocancel_time64() functions that will then take care
> > of the architecture specifics like syscall number, vdso and
> > truncation?
>
> It depends on how these functions behave.  I think we need to figure out
> if/how we maintain vDSO acceleration for clock_gettime and
> clock_gettime64 first.

Agreed. In the kernel, the generic clock_gettime64() support only just
landed for 5.3, and so far only arm32 and x86-32 have it, while at
least mips32, nds32, ppc32, s390 and sparc32 seem to have a the
old clock_gettime() and gettimeofday() vdso support.

For the internal __clock_gettime64() implementation, we clearly
want to call __vdso_clock_gettime64() whenever that is available,
but there does not seem to be a good fallback path from there
if it is not:

- if we try syscall(__NR_clock_gettime64) next, that is a performance
  regression in systems that don't care about 64-bit time_t
- if we try __vdso_clock_gettime() before __NR_clock_gettime64,
  that can result in incorrect behavior if user space actually relies
  on being able to work past 2038.

      Arnd
  
Lukasz Majewski July 17, 2019, 12:37 p.m. UTC | #6
Hi Alistair,

> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
> ---
>  nptl/pthread_mutex_timedlock.c          |  7 ++++++
>  sysdeps/unix/sysv/linux/clock_gettime.c | 30
> +++++++++++++++++++++++++ 2 files changed, 37 insertions(+)
> 
> diff --git a/nptl/pthread_mutex_timedlock.c
> b/nptl/pthread_mutex_timedlock.c index 52c258e33d..8d9cae7d87 100644
> --- a/nptl/pthread_mutex_timedlock.c
> +++ b/nptl/pthread_mutex_timedlock.c
> @@ -402,10 +402,17 @@ __pthread_mutex_clocklock_common
> (pthread_mutex_t *mutex, /* Delay the thread until the timeout is
> reached. Then return ETIMEDOUT.  */
>  		    struct timespec reltime;
> +#ifdef __NR_clock_gettime64
> +		    struct __timespec64 now;
> +
> +		    INTERNAL_SYSCALL (clock_gettime64, __err, 2,
> CLOCK_REALTIME,
> +				      &now);
> +#else
>  		    struct timespec now;
>  
>  		    INTERNAL_SYSCALL (clock_gettime, __err, 2,
> clockid, &now);
> +#endif
>  		    reltime.tv_sec = abstime->tv_sec - now.tv_sec;
>  		    reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
>  		    if (reltime.tv_nsec < 0)
> diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c
> b/sysdeps/unix/sysv/linux/clock_gettime.c index
> 5fc47fb7dc..4832069c34 100644 ---
> a/sysdeps/unix/sysv/linux/clock_gettime.c +++
> b/sysdeps/unix/sysv/linux/clock_gettime.c @@ -27,10 +27,40 @@
>  #include <sysdep-vdso.h>
>  
>  /* Get current value of CLOCK and store it in TP.  */
> +
> +#if __WORDSIZE == 32
> +int
> +__clock_gettime (clockid_t clock_id, struct timespec *tp)
> +{
> +   int ret;
> +
> +#ifdef __NR_clock_gettime64
> +  struct __timespec64 tp64;
> +  ret = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, &tp64);
> +
> +  tp->tv_sec = tp64.tv_sec;
> +  tp->tv_nsec = tp64.tv_nsec;
> +
> +  if (! in_time_t_range (tp->tv_sec))
> +    {
> +      __set_errno (EOVERFLOW);
> +      return -1;
> +    }
> +#endif
> +
> +#ifndef __ASSUME_TIME64_SYSCALLS

I think that the context of using the __ASSUME_TIME64_SYSCALLS doesn't
comply with the semantics which was proposed in its introduction patch
[1].

In short:

This define means that the system is supporting 64 bit time, not that
it has (can use) introduced in Linux 5.1 64 bit time related syscalls
(i.e. clock_gettime64, clock_settime64, clock_nanosleep64).

Maybe you could consider using the "conversion" approach as proposed
for __clock_settime64 conversion [2]?

The version in [2] is more "developer/validator" friendly (as suggested
by Arnd) as it uses for __TIMESIZE != 64 archs the 32 bit clock_settime.

The version from [3] is the one recommended by Joseph (which does the
32 to 64 bit conversion and calls clock_settime64).



The full code for a reference with clock_settime conversion and Y2038
support [4].

> +  ret = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
> +#endif
> +
> +  return ret;
> +}
> +#else
>  int
>  __clock_gettime (clockid_t clock_id, struct timespec *tp)
>  {
>    return INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
>  }
> +#endif
> +
>  weak_alias (__clock_gettime, clock_gettime)
>  libc_hidden_def (__clock_gettime)

Note:

[1] -
https://github.com/lmajewski/y2038_glibc/commit/1fdbc6002101a78a8a6a076bbb642b3082c2225d

[2] -
https://github.com/lmajewski/y2038_glibc/commit/69f842a8519ca13ed11fab0ff1bcc6fa1a524192

[3] -
https://github.com/lmajewski/y2038_glibc/commit/fa0f5ff6c942beca383daeff3d48829829ace5b1

[4] -

https://github.com/lmajewski/y2038_glibc/commits/Y2038-2.29-glibc-__clock-internal-struct-timespec-v6

Best regards,

Lukasz Majewski

--

DENX Software Engineering GmbH,      Managing Director: Wolfgang Denk
HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany
Phone: (+49)-8142-66989-59 Fax: (+49)-8142-66989-80 Email: lukma@denx.de
  
Florian Weimer July 17, 2019, 3:16 p.m. UTC | #7
* Arnd Bergmann:

> On Wed, Jul 17, 2019 at 10:44 AM Florian Weimer <fweimer@redhat.com> wrote:
>>
>> * Arnd Bergmann:
>>
>> >> I believe this needs to be updated for correctness (truncation of
>> >> tv_sec) if ever ported to architectures where __nanosleep_nocancel takes
>> >> a 32-bit time_t argument.  I don't know what our plans are regarding to
>> >> that.
>> >>
>> >> If you had
>> >>
>> >> #define __NR_clock_gettime64  __NR_clock_gettime
>> >>
>> >> in <sysdep.h>, you wouldn't need this change.
>> >
>> > Could it be changed to just call internal __clock_gettime64() and
>> > __nanosleep_nocancel_time64() functions that will then take care
>> > of the architecture specifics like syscall number, vdso and
>> > truncation?
>>
>> It depends on how these functions behave.  I think we need to figure out
>> if/how we maintain vDSO acceleration for clock_gettime and
>> clock_gettime64 first.
>
> Agreed. In the kernel, the generic clock_gettime64() support only just
> landed for 5.3, and so far only arm32 and x86-32 have it, while at
> least mips32, nds32, ppc32, s390 and sparc32 seem to have a the
> old clock_gettime() and gettimeofday() vdso support.
>
> For the internal __clock_gettime64() implementation, we clearly
> want to call __vdso_clock_gettime64() whenever that is available,
> but there does not seem to be a good fallback path from there
> if it is not:
>
> - if we try syscall(__NR_clock_gettime64) next, that is a performance
>   regression in systems that don't care about 64-bit time_t
> - if we try __vdso_clock_gettime() before __NR_clock_gettime64,
>   that can result in incorrect behavior if user space actually relies
>   on being able to work past 2038.

vDSO parsing happens ahead of time, during initialization, so the rules
are slightly different than with numbered syscall fallback.  System call
wrappers with both INLINE_VSYSCALL and fallback are really suspicious
and probably not a good idea in general.

The real question is whether vDSO and its constituent functions are
optional from a userspace ABI perspective.  If they are mandatory, we
can go straight to the 32-bit vDSO, and, possibly after that, the 32-bit
system call.  Instead of the vDSO implementation, we would install our
own emulation in the function pointer.  If the 64-bit vDSO is optional,
we would likely have to probe the 64-bit system call once, to keep
things sane and simple, and switch between the system call wrapper and
the fallback implementation (same as for the mandatory vDSO case).

Thanks,
Florian
  
Arnd Bergmann July 18, 2019, 7:38 a.m. UTC | #8
On Wed, Jul 17, 2019 at 5:16 PM Florian Weimer <fweimer@redhat.com> wrote:
>
> The real question is whether vDSO and its constituent functions are
> optional from a userspace ABI perspective.  If they are mandatory, we
> can go straight to the 32-bit vDSO, and, possibly after that, the 32-bit
> system call.  Instead of the vDSO implementation, we would install our
> own emulation in the function pointer.  If the 64-bit vDSO is optional,
> we would likely have to probe the 64-bit system call once, to keep
> things sane and simple, and switch between the system call wrapper and
> the fallback implementation (same as for the mandatory vDSO case).

Good question. I assumed that they would be optional because:

- Half the Linux architectures don't have any vdso support at all
- arm64 traditionally has no support for 32-bit vdso (added in
  linux-5.3), and still only contains it if an arm32 toolchain is available
  at kernel build time and we use gcc rather than clang (to be fixed
  in the future).
- I assumed that all libc implementations already need a fallback
  to plain syscalls to run on older kernels even for architectures that
  have it today.
- The recent rewrite of the vdso implementation in the kernel was not
  ready for 5.1, so no architecture has clock_gettime64() so far, but
  a lot of them clock_gettime()

I agree that assuming a vdso clock_gettime64() to be present on
all architectures would make this much easier for you, but I'd
still hope we can find a way to avoid truncating the times if you
do that.

I have two ideas for how that could be done:

- When building for a minimum kernel version of 5.1, don't
  fall back to __vdso_clock_gettime() or syscall(__NR_clock_gettime)
  but use the slow path for __clock_gettime64() if the vdso doesn't
  work.
- if __vdso_clock_gettime64() is unavailable and __vdso_clock_gettime()
  returns negative seconds, fall back to syscall(__NR_clock_gettime).

Would either of those meet your requirements?

       Arnd
  
Florian Weimer July 18, 2019, 8:18 a.m. UTC | #9
* Arnd Bergmann:

> I have two ideas for how that could be done:
>
> - When building for a minimum kernel version of 5.1, don't
>   fall back to __vdso_clock_gettime() or syscall(__NR_clock_gettime)
>   but use the slow path for __clock_gettime64() if the vdso doesn't
>   work.

Assuming that clock_gettime64 support is available, yes.

> - if __vdso_clock_gettime64() is unavailable and __vdso_clock_gettime()
>   returns negative seconds, fall back to syscall(__NR_clock_gettime).

I don't want to do anything like this.  I expect that some of us will
eventually use time namespaces to keep programs running (with incorrect
time).  If we make glibc itself time-sensitive, then things will get
horribly complex.

> Would either of those meet your requirements?

I don't have requirements.  I just want something that has limited
impact on 64-bit architectures.  I don't think probing at startup is too
bad, actually.

Thanks,
Florian
  
Arnd Bergmann July 18, 2019, 9:14 a.m. UTC | #10
On Thu, Jul 18, 2019 at 10:19 AM Florian Weimer <fweimer@redhat.com> wrote:
>
> * Arnd Bergmann:
>
> > I have two ideas for how that could be done:
> >
> > - When building for a minimum kernel version of 5.1, don't
> >   fall back to __vdso_clock_gettime() or syscall(__NR_clock_gettime)
> >   but use the slow path for __clock_gettime64() if the vdso doesn't
> >   work.
>
> Assuming that clock_gettime64 support is available, yes.

All 32-bit architectures have the clock_gettime64() syscall in 5.1.

> > Would either of those meet your requirements?
>
> I don't have requirements.  I just want something that has limited
> impact on 64-bit architectures.  I don't think probing at startup is too
> bad, actually.

Ok, fair enough.

      Arnd
  
Adhemerval Zanella Netto July 18, 2019, 6:10 p.m. UTC | #11
On 18/07/2019 05:18, Florian Weimer wrote:
> * Arnd Bergmann:
> 
>> I have two ideas for how that could be done:
>>
>> - When building for a minimum kernel version of 5.1, don't
>>   fall back to __vdso_clock_gettime() or syscall(__NR_clock_gettime)
>>   but use the slow path for __clock_gettime64() if the vdso doesn't
>>   work.
> 
> Assuming that clock_gettime64 support is available, yes.
> 
>> - if __vdso_clock_gettime64() is unavailable and __vdso_clock_gettime()
>>   returns negative seconds, fall back to syscall(__NR_clock_gettime).
> 
> I don't want to do anything like this.  I expect that some of us will
> eventually use time namespaces to keep programs running (with incorrect
> time).  If we make glibc itself time-sensitive, then things will get
> horribly complex.
> 
>> Would either of those meet your requirements?
> 
> I don't have requirements.  I just want something that has limited
> impact on 64-bit architectures.  I don't think probing at startup is too
> bad, actually.

I hope that my vDSO refactor patchset [1] may simplify the support.
For the patchset it just a matter of define the expected vDSO symbol
name (HAVE_CLOCK_GETTIME_VSYSCALL for time32), and then
{INLINE,INTERNAL}_VSYSCALL will check, demangle, call, and fallback
to syscall.

I would expect that for time64_t it would be a matter to just add
a new define, HAVE_CLOCK_GETTIME64_VSYSCALL, where each architecture
would define if were the case.  

[1] https://sourceware.org/ml/libc-alpha/2019-06/msg00344.html
  
Alistair Francis July 19, 2019, 9:03 p.m. UTC | #12
On Tue, Jul 16, 2019 at 10:39 PM Florian Weimer <fweimer@redhat.com> wrote:
>
> * Alistair Francis:
>
> > diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
> > index 52c258e33d..8d9cae7d87 100644
> > --- a/nptl/pthread_mutex_timedlock.c
> > +++ b/nptl/pthread_mutex_timedlock.c
> > @@ -402,10 +402,17 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
> >                   /* Delay the thread until the timeout is reached.
> >                      Then return ETIMEDOUT.  */
> >                   struct timespec reltime;
> > +#ifdef __NR_clock_gettime64
> > +                 struct __timespec64 now;
> > +
> > +                 INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,
> > +                                   &now);
> > +#else
> >                   struct timespec now;
> >
> >                   INTERNAL_SYSCALL (clock_gettime, __err, 2, clockid,
> >                                     &now);
> > +#endif
> >                   reltime.tv_sec = abstime->tv_sec - now.tv_sec;
> >                   reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
> >                   if (reltime.tv_nsec < 0)
>
> I believe this needs to be updated for correctness (truncation of
> tv_sec) if ever ported to architectures where __nanosleep_nocancel takes
> a 32-bit time_t argument.  I don't know what our plans are regarding to
> that.
>
> If you had
>
> #define __NR_clock_gettime64  __NR_clock_gettime
>
> in <sysdep.h>, you wouldn't need this change.

Yep, you are right. I have used the #define and removed this change.

>
> > diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
> > index 5fc47fb7dc..4832069c34 100644
> > --- a/sysdeps/unix/sysv/linux/clock_gettime.c
> > +++ b/sysdeps/unix/sysv/linux/clock_gettime.c
> > @@ -27,10 +27,40 @@
> >  #include <sysdep-vdso.h>
> >
> >  /* Get current value of CLOCK and store it in TP.  */
> > +
> > +#if __WORDSIZE == 32
> > +int
> > +__clock_gettime (clockid_t clock_id, struct timespec *tp)
> > +{
> > +   int ret;
> > +
> > +#ifdef __NR_clock_gettime64
> > +  struct __timespec64 tp64;
> > +  ret = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, &tp64);
> > +
> > +  tp->tv_sec = tp64.tv_sec;
> > +  tp->tv_nsec = tp64.tv_nsec;
> > +
> > +  if (! in_time_t_range (tp->tv_sec))
> > +    {
> > +      __set_errno (EOVERFLOW);
> > +      return -1;
> > +    }
> > +#endif
> > +
> > +#ifndef __ASSUME_TIME64_SYSCALLS
> > +  ret = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
> > +#endif
> > +
> > +  return ret;
> > +}
>
> I think this has the same problems as the timespec_get patch.

I'm still unclea what that problem is. Lukasz pointed out these don't
match the other time64_t conversions so I'll update it to use that
style anyway.

Alistair

>
> Thanks,
> Florian
  

Patch

diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index 52c258e33d..8d9cae7d87 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -402,10 +402,17 @@  __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
 		    /* Delay the thread until the timeout is reached.
 		       Then return ETIMEDOUT.  */
 		    struct timespec reltime;
+#ifdef __NR_clock_gettime64
+		    struct __timespec64 now;
+
+		    INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,
+				      &now);
+#else
 		    struct timespec now;
 
 		    INTERNAL_SYSCALL (clock_gettime, __err, 2, clockid,
 				      &now);
+#endif
 		    reltime.tv_sec = abstime->tv_sec - now.tv_sec;
 		    reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
 		    if (reltime.tv_nsec < 0)
diff --git a/sysdeps/unix/sysv/linux/clock_gettime.c b/sysdeps/unix/sysv/linux/clock_gettime.c
index 5fc47fb7dc..4832069c34 100644
--- a/sysdeps/unix/sysv/linux/clock_gettime.c
+++ b/sysdeps/unix/sysv/linux/clock_gettime.c
@@ -27,10 +27,40 @@ 
 #include <sysdep-vdso.h>
 
 /* Get current value of CLOCK and store it in TP.  */
+
+#if __WORDSIZE == 32
+int
+__clock_gettime (clockid_t clock_id, struct timespec *tp)
+{
+   int ret;
+
+#ifdef __NR_clock_gettime64
+  struct __timespec64 tp64;
+  ret = INLINE_VSYSCALL (clock_gettime64, 2, clock_id, &tp64);
+
+  tp->tv_sec = tp64.tv_sec;
+  tp->tv_nsec = tp64.tv_nsec;
+
+  if (! in_time_t_range (tp->tv_sec))
+    {
+      __set_errno (EOVERFLOW);
+      return -1;
+    }
+#endif
+
+#ifndef __ASSUME_TIME64_SYSCALLS
+  ret = INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
+#endif
+
+  return ret;
+}
+#else
 int
 __clock_gettime (clockid_t clock_id, struct timespec *tp)
 {
   return INLINE_VSYSCALL (clock_gettime, 2, clock_id, tp);
 }
+#endif
+
 weak_alias (__clock_gettime, clock_gettime)
 libc_hidden_def (__clock_gettime)