@@ -1043,7 +1043,7 @@ setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
val = INTERNAL_SYSCALL_CALL (tgkill, pid, t->tid, SIGSETXID);
/* If this failed, it must have had not started yet or else exited. */
- if (!INTERNAL_SYSCALL_ERROR_P (val))
+ if (!syscall_error (val))
{
atomic_increment (&cmdp->cntr);
return 1;
@@ -1172,7 +1172,7 @@ __nptl_setxid (struct xid_command *cmdp)
result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, 3,
cmdp->id[0], cmdp->id[1], cmdp->id[2]);
int error = 0;
- if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result)))
+ if (__glibc_unlikely (syscall_error (result)))
{
error = -result;
__set_errno (error);
@@ -191,7 +191,7 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx)
result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, 3, __xidcmd->id[0],
__xidcmd->id[1], __xidcmd->id[2]);
int error = 0;
- if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result)))
+ if (__glibc_unlikely (syscall_error (result)))
error = -result;
__nptl_setxid_error (__xidcmd, error);
@@ -243,7 +243,7 @@ __pthread_initialize_minimal_internal (void)
__data.__list.__next));
int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
sizeof (struct robust_list_head));
- if (INTERNAL_SYSCALL_ERROR_P (res))
+ if (syscall_error (res))
set_robust_list_not_avail ();
}
@@ -69,7 +69,7 @@ __pthread_cancel (pthread_t th)
int val = INTERNAL_SYSCALL_CALL (tgkill, pid, pd->tid,
SIGCANCEL);
- if (INTERNAL_SYSCALL_ERROR_P (val))
+ if (syscall_error (val))
result = -val;
break;
@@ -33,7 +33,7 @@ __pthread_getaffinity_np (pthread_t th, size_t cpusetsize, cpu_set_t *cpuset)
int res = INTERNAL_SYSCALL_CALL (sched_getaffinity, pd->tid,
MIN (INT_MAX, cpusetsize), cpuset);
- if (INTERNAL_SYSCALL_ERROR_P (res))
+ if (syscall_error (res))
return -res;
/* Clean the rest of the memory the kernel didn't do. */
@@ -301,7 +301,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
__lll_private_flag (FUTEX_TRYLOCK_PI,
private), 0, 0);
- if (INTERNAL_SYSCALL_ERROR_P (e) && e == -EWOULDBLOCK)
+ if (e == -EWOULDBLOCK)
{
/* The kernel has not yet finished the mutex owner death.
We do not need to ensure ordering wrt another memory
@@ -28,14 +28,9 @@ __pthread_setaffinity_new (pthread_t th, size_t cpusetsize,
const cpu_set_t *cpuset)
{
const struct pthread *pd = (const struct pthread *) th;
- int res;
- res = INTERNAL_SYSCALL_CALL (sched_setaffinity, pd->tid, cpusetsize,
- cpuset);
-
- return (INTERNAL_SYSCALL_ERROR_P (res)
- ? -res
- : 0);
+ return -INTERNAL_SYSCALL_CALL (sched_setaffinity, pd->tid, cpusetsize,
+ cpuset);
}
versioned_symbol (libpthread, __pthread_setaffinity_new,
pthread_setaffinity_np, GLIBC_2_3_4);
@@ -38,12 +38,8 @@ __pthread_sigmask (int how, const sigset_t *newmask, sigset_t *oldmask)
}
/* We know that realtime signals are available if NPTL is used. */
- int result = INTERNAL_SYSCALL_CALL (rt_sigprocmask, how, newmask,
- oldmask, __NSIG_BYTES);
-
- return (INTERNAL_SYSCALL_ERROR_P (result)
- ? -result
- : 0);
+ return -INTERNAL_SYSCALL_CALL (rt_sigprocmask, how, newmask, oldmask,
+ __NSIG_BYTES);
}
libc_hidden_def (__pthread_sigmask)
@@ -81,7 +81,7 @@ typedef struct
long result_var; \
__builtin_set_thread_pointer (tcbp); \
result_var = INTERNAL_SYSCALL_CALL (arc_settls, (tcbp));\
- INTERNAL_SYSCALL_ERROR_P (result_var) \
+ syscall_error (result_var) \
? "settls syscall error" : NULL; \
})
@@ -99,7 +99,7 @@ typedef struct
({ long int result_var; \
result_var = INTERNAL_SYSCALL_CALL (set_thread_area, \
(char *) (tcbp) + TLS_TCB_OFFSET); \
- INTERNAL_SYSCALL_ERROR_P (result_var) \
+ syscall_error (result_var) \
? "unknown error" : NULL; })
/* Return the address of the dtv for the current thread. */
@@ -99,7 +99,7 @@ typedef struct
\
_sys_result = INTERNAL_SYSCALL_CALL (set_thread_area, \
((void *) (tcbp)) + TLS_TCB_OFFSET); \
- INTERNAL_SYSCALL_ERROR_P (_sys_result) ? "unknown error" : NULL; })
+ syscall_error (_sys_result) ? "unknown error" : NULL; })
# define TLS_DEFINE_INIT_TP(tp, pd) \
void *tp = (void *) (pd) + TLS_TCB_OFFSET + TLS_PRE_TCB_SIZE
@@ -123,7 +123,7 @@ typedef struct
({ long int result_var; \
result_var = INTERNAL_SYSCALL_CALL (set_thread_area, \
(char *) (tcbp) + TLS_TCB_OFFSET); \
- INTERNAL_SYSCALL_ERROR_P (result_var) \
+ syscall_error (result_var) \
? "unknown error" : NULL; })
/* Value passed to 'clone' for initialization of the thread register. */
@@ -66,12 +66,7 @@
# endif
# define lll_futex_syscall(nargs, futexp, op, ...) \
- ({ \
- long int __ret = INTERNAL_SYSCALL (futex, nargs, futexp, op, \
- __VA_ARGS__); \
- (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (__ret)) \
- ? __ret : 0); \
- })
+ INTERNAL_SYSCALL (futex, nargs, futexp, op, __VA_ARGS__)
/* For most of these macros, the return value is never really used.
Nevertheless, the protocol is that each one returns a negated errno
@@ -70,7 +70,7 @@ libc_hidden_proto (__feraiseexcept_soft)
\
_spefscr = fegetenv_register (); \
_r = INTERNAL_SYSCALL_CALL (prctl, PR_GET_FPEXC, &_ftrapex); \
- if (INTERNAL_SYSCALL_ERROR_P (_r)) \
+ if (syscall_error (_r)) \
_ftrapex = 0; \
} \
while (0)
@@ -33,7 +33,7 @@
# define TLS_INIT_TP(tcbp) \
({ long int result_var; \
result_var = INTERNAL_SYSCALL_CALL (set_tls, (tcbp)); \
- INTERNAL_SYSCALL_ERROR_P (result_var) \
+ syscall_error (result_var) \
? "unknown error" : NULL; })
#endif /* __ASSEMBLER__ */
@@ -57,7 +57,7 @@ __clock_nanosleep_time64 (clockid_t clock_id, int flags, const struct __timespec
struct timespec ts32 = valid_timespec64_to_timespec (*req);
r = INTERNAL_SYSCALL_CANCEL (clock_nanosleep, clock_id, flags,
&ts32, &tr32);
- if (INTERNAL_SYSCALL_ERROR_P (r))
+ if (syscall_error (r))
{
if (r == -EINTR && rem != NULL && (flags & TIMER_ABSTIME) == 0)
*rem = valid_timespec_to_timespec64 (tr32);
@@ -123,7 +123,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
attr->extension->cpusetsize,
attr->extension->cpuset);
- if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res)))
+ if (__glibc_unlikely (syscall_error (res)))
err_out:
{
/* The operation failed. We have to kill the thread.
@@ -144,7 +144,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
res = INTERNAL_SYSCALL_CALL (sched_setscheduler, pd->tid,
pd->schedpolicy, &pd->schedparam);
- if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res)))
+ if (__glibc_unlikely (syscall_error (res)))
goto err_out;
}
}
@@ -40,7 +40,7 @@ _dl_get_origin (void)
len = INTERNAL_SYSCALL_CALL (readlink, "/proc/self/exe", linkval,
sizeof (linkval));
- if (! INTERNAL_SYSCALL_ERROR_P (len) && len > 0 && linkval[0] != '[')
+ if (! syscall_error (len) && len > 0 && linkval[0] != '[')
{
/* We can use this value. */
assert (linkval[0] == '/');
@@ -53,7 +53,7 @@ __fcntl64_nocancel_adjusted (int fd, int cmd, void *arg)
{
struct f_owner_ex fex;
int res = INTERNAL_SYSCALL_CALL (fcntl64, fd, F_GETOWN_EX, &fex);
- if (!INTERNAL_SYSCALL_ERROR_P (res))
+ if (!syscall_error (res))
return fex.type == F_OWNER_GID ? -fex.pid : fex.pid;
return INLINE_SYSCALL_ERROR_RETURN_VALUE (-res);
@@ -80,7 +80,7 @@ __fstatat (int fd, const char *file, struct stat *buf, int flag)
# endif /* __nr_fstatat64 */
# endif /* STAT_IS_KERNEL_STAT */
- return INTERNAL_SYSCALL_ERROR_P (r)
+ return syscall_error (r)
? INLINE_SYSCALL_ERROR_RETURN_VALUE (-r)
: 0;
}
@@ -102,7 +102,7 @@ __fstatat64_time64 (int fd, const char *file, struct __stat64_t64 *buf,
# endif
#endif
- return INTERNAL_SYSCALL_ERROR_P (r)
+ return syscall_error (r)
? INLINE_SYSCALL_ERROR_RETURN_VALUE (-r)
: 0;
}
@@ -41,7 +41,7 @@ _dl_get_origin (void)
len = INTERNAL_SYSCALL_CALL (readlinkat, AT_FDCWD, "/proc/self/exe",
linkval, sizeof (linkval));
- if (! INTERNAL_SYSCALL_ERROR_P (len) && len > 0 && linkval[0] != '[')
+ if (! syscall_error (len) && len > 0 && linkval[0] != '[')
{
/* We can use this value. */
assert (linkval[0] == '/');
@@ -35,7 +35,7 @@
that we are jumping *out* of the alternate stack. Note that \
the check for that is the same as that for _STACK_GROWS_UP \
as for _STACK_GROWS_DOWN. */ \
- if (!INTERNAL_SYSCALL_ERROR_P (result) \
+ if (!syscall_error (result) \
&& ((oss.ss_flags & SS_ONSTACK) == 0 \
|| ((unsigned long) oss.ss_sp + oss.ss_size \
- (unsigned long) (sp)) < oss.ss_size)) \
@@ -25,7 +25,7 @@ writev_for_fatal (int fd, const struct iovec *iov, size_t niov, size_t total)
ssize_t cnt;
do
cnt = INTERNAL_SYSCALL_CALL (writev, fd, iov, niov);
- while (INTERNAL_SYSCALL_ERROR_P (cnt) && cnt == -EINTR);
+ while (syscall_error (cnt) && cnt == -EINTR);
return cnt == total;
}
#define WRITEV_FOR_FATAL writev_for_fatal
@@ -26,7 +26,7 @@
{ \
stack_t oss; \
int result = INTERNAL_SYSCALL_CALL (sigaltstack, NULL, &oss); \
- if (!INTERNAL_SYSCALL_ERROR_P (result) \
+ if (!syscall_error (result) \
&& ((oss.ss_flags & SS_ONSTACK) == 0 \
|| ((unsigned long) oss.ss_sp + oss.ss_size \
- (unsigned long) (sp)) < oss.ss_size)) \
@@ -35,7 +35,7 @@ __getpagesize (void)
result = INTERNAL_SYSCALL_CALL (getpagesize);
/* The only possible error is ENOSYS. */
- if (!INTERNAL_SYSCALL_ERROR_P (result))
+ if (!syscall_error (result))
return result;
return 4096;
@@ -30,7 +30,7 @@ mq_unlink (const char *name)
/* While unlink can return either EPERM or EACCES, mq_unlink should
return just EACCES. */
- if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (ret)))
+ if (__glibc_unlikely (syscall_error (ret)))
{
if (ret == -EPERM)
ret = -EACCES;
@@ -25,23 +25,15 @@
static inline int
__access_noerrno (const char *pathname, int mode)
{
- int res;
#ifdef __NR_access
- res = INTERNAL_SYSCALL_CALL (access, pathname, mode);
+ return -INTERNAL_SYSCALL_CALL (access, pathname, mode);
#else
- res = INTERNAL_SYSCALL_CALL (faccessat, AT_FDCWD, pathname, mode);
+ return -INTERNAL_SYSCALL_CALL (faccessat, AT_FDCWD, pathname, mode);
#endif
- if (INTERNAL_SYSCALL_ERROR_P (res))
- return -res;
- return 0;
}
static inline int
__kill_noerrno (pid_t pid, int sig)
{
- int res;
- res = INTERNAL_SYSCALL_CALL (kill, pid, sig);
- if (INTERNAL_SYSCALL_ERROR_P (res))
- return -res;
- return 0;
+ return -INTERNAL_SYSCALL_CALL (kill, pid, sig);
}
@@ -36,7 +36,7 @@ setup_thread (struct database_dyn *db)
since none of the threads ever terminates. */
int r = INTERNAL_SYSCALL_CALL (set_tid_address,
&db->head->nscd_certainly_running);
- if (!INTERNAL_SYSCALL_ERROR_P (r))
+ if (!syscall_error (r))
/* We know the kernel can reset this field when nscd terminates.
So, set the field to a nonzero value which indicates that nscd
is certainly running and clients can skip the test. */
@@ -42,24 +42,23 @@ int
posix_fadvise (int fd, off_t offset, off_t len, int advise)
{
# if defined (__NR_fadvise64) && !defined (__ASSUME_FADVISE64_AS_64_64)
- int ret = INTERNAL_SYSCALL_CALL (fadvise64, fd,
- __ALIGNMENT_ARG SYSCALL_LL (offset),
- len, advise);
+ return -INTERNAL_SYSCALL_CALL (fadvise64, fd,
+ __ALIGNMENT_ARG SYSCALL_LL (offset),
+ len, advise);
# else
# ifdef __ASSUME_FADVISE64_64_6ARG
- int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, fd, advise,
- SYSCALL_LL (offset), SYSCALL_LL (len));
+ return -INTERNAL_SYSCALL_CALL (fadvise64_64, fd, advise,
+ SYSCALL_LL (offset), SYSCALL_LL (len));
# else
# ifndef __NR_fadvise64_64
# define __NR_fadvise64_64 __NR_fadvise64
# endif
- int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, fd,
- __ALIGNMENT_ARG SYSCALL_LL (offset),
- SYSCALL_LL (len), advise);
+ return -INTERNAL_SYSCALL_CALL (fadvise64_64, fd,
+ __ALIGNMENT_ARG SYSCALL_LL (offset),
+ SYSCALL_LL (len), advise);
# endif
# endif
- return INTERNAL_SYSCALL_ERROR_P (ret) ? -ret : 0;
}
#endif /* __OFF_T_MATCHES_OFF64_T */
@@ -41,14 +41,13 @@ int
__posix_fadvise64_l64 (int fd, off64_t offset, off64_t len, int advise)
{
#ifdef __ASSUME_FADVISE64_64_6ARG
- int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, fd, advise,
- SYSCALL_LL64 (offset), SYSCALL_LL64 (len));
+ return -INTERNAL_SYSCALL_CALL (fadvise64_64, fd, advise,
+ SYSCALL_LL64 (offset), SYSCALL_LL64 (len));
#else
- int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, fd,
- __ALIGNMENT_ARG SYSCALL_LL64 (offset),
- SYSCALL_LL64 (len), advise);
+ return -INTERNAL_SYSCALL_CALL (fadvise64_64, fd,
+ __ALIGNMENT_ARG SYSCALL_LL64 (offset),
+ SYSCALL_LL64 (len), advise);
#endif
- return INTERNAL_SYSCALL_ERROR_P (ret) ? -ret : 0;
}
/* The type of the len argument was changed from size_t to off_t in
@@ -28,9 +28,7 @@ posix_fallocate (int fd, __off_t offset, __off_t len)
{
int res = INTERNAL_SYSCALL_CALL (fallocate, fd, 0,
SYSCALL_LL (offset), SYSCALL_LL (len));
- if (! INTERNAL_SYSCALL_ERROR_P (res))
- return 0;
- if (res != -EOPNOTSUPP)
+ if (! syscall_error (res) || -res != EOPNOTSUPP)
return -res;
return internal_fallocate (fd, offset, len);
}
@@ -30,9 +30,7 @@ __posix_fallocate64_l64 (int fd, __off64_t offset, __off64_t len)
{
int res = INTERNAL_SYSCALL_CALL (fallocate, fd, 0,
SYSCALL_LL64 (offset), SYSCALL_LL64 (len));
- if (! INTERNAL_SYSCALL_ERROR_P (res))
- return 0;
- if (-res != EOPNOTSUPP)
+ if (! syscall_error (res) || -res != EOPNOTSUPP)
return -res;
return internal_fallocate64 (fd, offset, len);
}
@@ -50,7 +50,6 @@ __pthread_kill (pthread_t threadid, int signo)
/* We have a special syscall to do the work. */
pid_t pid = __getpid ();
- int val = INTERNAL_SYSCALL_CALL (tgkill, pid, tid, signo);
- return INTERNAL_SYSCALL_ERROR_P (val) ? -val : 0;
+ return -INTERNAL_SYSCALL_CALL (tgkill, pid, tid, signo);
}
strong_alias (__pthread_kill, pthread_kill)
@@ -61,9 +61,7 @@ pthread_sigqueue (pthread_t threadid, int signo, const union sigval value)
info.si_value = value;
/* We have a special syscall to do the work. */
- int val = INTERNAL_SYSCALL_CALL (rt_tgsigqueueinfo, pid, tid, signo,
- &info);
- return INTERNAL_SYSCALL_ERROR_P (val) ? -val : 0;
+ return -INTERNAL_SYSCALL_CALL (rt_tgsigqueueinfo, pid, tid, signo, &info);
#else
return ENOSYS;
#endif
@@ -27,7 +27,7 @@ syscall (long int syscall_number, long int arg1, long int arg2, long int arg3,
ret = INTERNAL_SYSCALL_NCS (syscall_number, 7, arg1, arg2, arg3, arg4,
arg5, arg6, arg7);
- if (INTERNAL_SYSCALL_ERROR_P (ret))
+ if (syscall_error (ret))
return __syscall_error (ret);
return ret;
@@ -40,7 +40,7 @@
{ \
stack_t oss; \
int res = INTERNAL_SYSCALL_CALL (sigaltstack, NULL, &oss); \
- if (!INTERNAL_SYSCALL_ERROR_P (res)) \
+ if (!syscall_error (res)) \
{ \
if ((oss.ss_flags & SS_ONSTACK) == 0 \
|| ((uintptr_t) (oss.ss_sp + oss.ss_size) - new_sp \
@@ -42,8 +42,7 @@ __posix_fadvise64_l64 (int fd, off64_t offset, off64_t len, int advise)
parameters.offset = offset;
parameters.len = len;
parameters.advise = advise;
- int ret = INTERNAL_SYSCALL_CALL (fadvise64_64, ¶meters);
- return INTERNAL_SYSCALL_ERROR_P (ret) ? -ret : 0;
+ return -INTERNAL_SYSCALL_CALL (fadvise64_64, ¶meters);
}
#include <shlib-compat.h>
@@ -40,7 +40,7 @@
{ \
stack_t oss; \
int res = INTERNAL_SYSCALL_CALL (sigaltstack, NULL, &oss); \
- if (!INTERNAL_SYSCALL_ERROR_P (res)) \
+ if (!syscall_error (res)) \
{ \
if ((oss.ss_flags & SS_ONSTACK) == 0 \
|| ((uintptr_t) (oss.ss_sp + oss.ss_size) - new_sp \
@@ -35,7 +35,7 @@ shmat (int shmid, const void *shmaddr, int shmflg)
resultvar = INTERNAL_SYSCALL_CALL (ipc, IPCOP_shmat, shmid, shmflg,
&raddr, shmaddr);
- if (INTERNAL_SYSCALL_ERROR_P (resultvar))
+ if (syscall_error (resultvar))
return (void *) INLINE_SYSCALL_ERROR_RETURN_VALUE (-resultvar);
return raddr;
@@ -36,14 +36,14 @@
if (vdsop != NULL) \
{ \
sc_ret = INTERNAL_VSYSCALL_CALL (vdsop, nr, ##args); \
- if (!INTERNAL_SYSCALL_ERROR_P (sc_ret)) \
+ if (!syscall_error (sc_ret)) \
goto out; \
if (sc_ret != -ENOSYS) \
goto iserr; \
} \
\
sc_ret = INTERNAL_SYSCALL_CALL (name, ##args); \
- if (INTERNAL_SYSCALL_ERROR_P (sc_ret)) \
+ if (syscall_error (sc_ret)) \
{ \
iserr: \
__set_errno (-sc_ret); \
@@ -22,9 +22,13 @@
#include <kernel-features.h>
#include <errno.h>
-#undef INTERNAL_SYSCALL_ERROR_P
-#define INTERNAL_SYSCALL_ERROR_P(val) \
- ((unsigned long int) (val) > -4096UL)
+#ifndef __ASSEMBLER__
+static inline _Bool
+syscall_error (unsigned long int val)
+{
+ return val > -4096UL;
+}
+#endif
#ifndef SYSCALL_ERROR_LABEL
# define SYSCALL_ERROR_LABEL(sc_err) \
@@ -34,6 +38,7 @@
})
#endif
+#ifndef __ASSEMBLER__
/* Define a macro which expands into the inline wrapper code for a system
call. It sets the errno and returns -1 on a failure, or the syscall
return value otherwise. */
@@ -41,10 +46,11 @@
#define INLINE_SYSCALL(name, nr, args...) \
({ \
long int sc_ret = INTERNAL_SYSCALL (name, nr, args); \
- __glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (sc_ret)) \
+ __glibc_unlikely (syscall_error (sc_ret)) \
? SYSCALL_ERROR_LABEL (-sc_ret) \
: sc_ret; \
})
+#endif
/* Set error number and return -1. A target may choose to return the
internal function, __syscall_error, which sets errno and returns -1.
@@ -124,7 +124,7 @@ timer_create (clockid_t clock_id, struct sigevent *evp, timer_t *timerid)
int res;
res = INTERNAL_SYSCALL_CALL (timer_create, syscall_clockid, &sev,
&newp->ktimerid);
- if (INTERNAL_SYSCALL_ERROR_P (res))
+ if (syscall_error (res))
{
free (newp);
__set_errno (-res);
@@ -19,12 +19,15 @@
#include <sys/times.h>
#include <sysdep.h>
+#ifndef SYSCALL_ERROR
+# define SYSCALL_ERROR(__val) syscall_error(__val)
+#endif
clock_t
__times (struct tms *buf)
{
clock_t ret = INTERNAL_SYSCALL_CALL (times, buf);
- if (INTERNAL_SYSCALL_ERROR_P (ret)
+ if (syscall_error (ret)
&& __glibc_unlikely (ret == -EFAULT)
&& buf)
{
@@ -33,8 +33,11 @@
(long long int) resultvar; \
})
-#undef INTERNAL_SYSCALL_ERROR_P
-#define INTERNAL_SYSCALL_ERROR_P(val) \
- ((unsigned long long int) (val) >= -4095LL)
+static inline _Bool
+syscall_error_u64 (unsigned long long int val)
+{
+ return val > -4096ULL;
+}
+#define SYSCALL_ERROR(__val) syscall_error_u64 (__val)
#include <sysdeps/unix/sysv/linux/times.c>