@@ -156,6 +156,15 @@ extern int __clock_getres64 (clockid_t clock_id,
libc_hidden_proto (__clock_getres64);
#endif
+#if __TIMESIZE == 64
+# define __clock_nanosleep64 __clock_nanosleep
+#else
+extern int __clock_nanosleep64 (clockid_t clock_id, int flags,
+ const struct __timespec64 *req,
+ struct __timespec64 *rem);
+libc_hidden_proto (__clock_nanosleep64);
+#endif
+
/* Compute the `struct tm' representation of T,
offset OFFSET seconds east of UTC,
and store year, yday, mon, mday, wday, hour, min, sec into *TP.
@@ -25,8 +25,8 @@
/* We can simply use the syscall. The CPU clocks are not supported
with this function. */
int
-__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
- struct timespec *rem)
+__clock_nanosleep64 (clockid_t clock_id, int flags,
+ const struct __timespec64 *req, struct __timespec64 *rem)
{
if (clock_id == CLOCK_THREAD_CPUTIME_ID)
return EINVAL;
@@ -36,9 +36,72 @@ __clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
/* If the call is interrupted by a signal handler or encounters an error,
it returns a positive value similar to errno. */
INTERNAL_SYSCALL_DECL (err);
+#if defined (__TIMESIZE) && __TIMESIZE != 64
+ int r;
+# ifdef __NR_clock_nanosleep_time64
+ /* For 32 bit systems with no Y2038 support the *req may have tv_pad
+ with some random values as *req from __clock_nanosleep is converted
+ to automatically allocated struct __timespec64 (req64).
+
+ For 32 bit systems being Y2038 safe the tv_pad may be not zero,
+ as glibc exported struct timespec has 64 bit tv_sec, 32 bit
+ tv_nsec (to be still POSIX compliant -> long tv_nsec ) and 32
+ bits of unnamed padding.
+ If user program allocates the struct timespec automatically, the
+ padding may have random value and as being directly passed to
+ *req needs to be cleared. */
+ timespec64_clear_padding (req);
+ r = INTERNAL_SYSCALL_CANCEL (clock_nanosleep_time64, err, clock_id,
+ flags, req, rem);
+ int ret = (INTERNAL_SYSCALL_ERROR_P (r, err)
+ ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
+# ifdef __ASSUME_64BIT_TIME
+ return ret;
+# else
+ if (r == 0 || errno != ENOSYS)
+ /* Preserve non-error/non-ENOSYS return values. */
+ return ret;
+# endif
+# endif
+ struct timespec req32, rem32;
+ valid_timespec64_to_timespec(req, &req32);
+ r = INTERNAL_SYSCALL_CANCEL (clock_nanosleep, err, clock_id, flags,
+ &req32, &rem32);
+
+ if (! INTERNAL_SYSCALL_ERROR_P (r, err))
+ valid_timespec_to_timespec64(&rem32, rem);
+#else
int r = INTERNAL_SYSCALL_CANCEL (clock_nanosleep, err, clock_id, flags,
- req, rem);
+ req, rem);
+#endif
return (INTERNAL_SYSCALL_ERROR_P (r, err)
- ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
+ ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
}
weak_alias (__clock_nanosleep, clock_nanosleep)
+
+#if __TIMESIZE != 64
+int
+__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
+ struct timespec *rem)
+{
+ struct __timespec64 req64, rem64;
+ int retval;
+
+ if (! in_time_t_range (req->tv_sec))
+ {
+ __set_errno (EOVERFLOW);
+ return -1;
+ }
+
+ valid_timespec_to_timespec64 (req, &req64);
+ retval = __clock_nanosleep64 (clock_id, flags, &req64, &rem64);
+
+ if (! retval && rem && ! timespec64_to_timespec (&rem64, rem))
+ {
+ __set_errno (EOVERFLOW);
+ return -1;
+ }
+
+ return retval;
+}
+#endif