From patchwork Mon Oct 27 07:59:41 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Pinski X-Patchwork-Id: 3429 Received: (qmail 31688 invoked by alias); 27 Oct 2014 08:03:27 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Delivered-To: mailing list libc-alpha@sourceware.org Received: (qmail 31609 invoked by uid 89); 27 Oct 2014 08:03:26 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-2.6 required=5.0 tests=AWL, BAYES_00, RCVD_IN_DNSWL_LOW autolearn=ham version=3.3.2 X-HELO: mail-ie0-f171.google.com X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=dgyZyLIMYrltMZ12zntfzKU/GPy0UGH5ldJsPgO0zB0=; b=jMISfLhiqf1/PMyWo8a038EAfi/FdsRcoeey0MDnBk4u8R0ahuk5qWrjI7u9s7TyiX bQjsBf1vkgRnv3TFidZYOwd4/i75b69DlR2ndU5wtAl/m4S51yfA1RwLw0pvU4SUo5uW hUzGKijo142CpXHWns5SGaTBvo1dZXM5STBL4u2srrbivwrD07Wa5sZi3+ra41esMzVf fgwlJr/sAo5XZsxNFanvbC6TxZL99XWQA+rV4+i3Sv2kCVrmbCz/EoYThFIjtPLH0VyB P4O3N7mWpCaxDAuJtywoB8aWCoXstIdnNMMIw5Etut64uzy8Eoow55b44jH4f+gvFcDO sQuQ== X-Gm-Message-State: ALoCoQmk32bIiON+pJn3sBZhdKyis2Lz2itK4EKs/YsO33Tri/I9dg+C4OnIGaEhgkMK/+5xdeQk X-Received: by 10.107.11.129 with SMTP id 1mr9346104iol.18.1414397002749; Mon, 27 Oct 2014 01:03:22 -0700 (PDT) From: Andrew Pinski To: libc-alpha@sourceware.org Cc: Andrew Pinski Subject: [PATCH 17/29] [AARCH64] Syscalls for ILP32 are passed always via 64bit values. Date: Mon, 27 Oct 2014 00:59:41 -0700 Message-Id: <1414396793-9005-18-git-send-email-apinski@cavium.com> In-Reply-To: <1414396793-9005-1-git-send-email-apinski@cavium.com> References: <1414396793-9005-1-git-send-email-apinski@cavium.com> This patch adds support for ILP32 syscalls, sign and zero extending where needed. Unlike LP64, pointers are 32bit and need to be zero extended rather than the standard sign extend that the code would do. We take advatage of ssize_t being long rather than int for ILP32, to get this correct. * sysdeps/unix/sysv/linux/aarch64/sysdep.h (INLINE_VSYSCALL): Use long long instead of long. (INTERNAL_VSYSCALL): Likewise. (INLINE_SYSCALL): Likewise. (INTERNAL_SYSCALL_RAW): Likewise. (ARGIFY): New macro. (LOAD_ARGS_0): Use long long instead of long. (LOAD_ARGS_1): Use long long instead of long and use ARGIFY. (LOAD_ARGS_2): Likewise. (LOAD_ARGS_3): Likewise. (LOAD_ARGS_4): Likewise. (LOAD_ARGS_5): Likewise. (LOAD_ARGS_6): Likewise. (LOAD_ARGS_7): Likewise. --- sysdeps/unix/sysv/linux/aarch64/sysdep.h | 52 ++++++++++++++++++----------- 1 files changed, 32 insertions(+), 20 deletions(-) diff --git a/sysdeps/unix/sysv/linux/aarch64/sysdep.h b/sysdeps/unix/sysv/linux/aarch64/sysdep.h index fc31661..0d9fa8a 100644 --- a/sysdeps/unix/sysv/linux/aarch64/sysdep.h +++ b/sysdeps/unix/sysv/linux/aarch64/sysdep.h @@ -156,7 +156,7 @@ ({ \ __label__ out; \ __label__ iserr; \ - long sc_ret; \ + long long sc_ret; \ INTERNAL_SYSCALL_DECL (sc_err); \ \ if (__vdso_##name != NULL) \ @@ -187,7 +187,7 @@ # define INTERNAL_VSYSCALL(name, err, nr, args...) \ ({ \ __label__ out; \ - long v_ret; \ + long long v_ret; \ \ if (__vdso_##name != NULL) \ { \ @@ -224,11 +224,11 @@ call. */ # undef INLINE_SYSCALL # define INLINE_SYSCALL(name, nr, args...) \ - ({ unsigned long _sys_result = INTERNAL_SYSCALL (name, , nr, args); \ + ({ unsigned long long _sys_result = INTERNAL_SYSCALL (name, , nr, args); \ if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (_sys_result, ), 0))\ { \ __set_errno (INTERNAL_SYSCALL_ERRNO (_sys_result, )); \ - _sys_result = (unsigned long) -1; \ + _sys_result = (unsigned long long) -1; \ } \ (long) _sys_result; }) @@ -237,10 +237,10 @@ # undef INTERNAL_SYSCALL_RAW # define INTERNAL_SYSCALL_RAW(name, err, nr, args...) \ - ({ long _sys_result; \ + ({ long long _sys_result; \ { \ LOAD_ARGS_##nr (args) \ - register long _x8 asm ("x8") = (name); \ + register long long _x8 asm ("x8") = (name); \ asm volatile ("svc 0 // syscall " # name \ : "=r" (_x0) : "r"(_x8) ASM_ARGS_##nr : "memory"); \ _sys_result = _x0; \ @@ -262,36 +262,48 @@ # undef INTERNAL_SYSCALL_ERRNO # define INTERNAL_SYSCALL_ERRNO(val, err) (-(val)) +/* Convert X to a long long, without losing any bits if it is one + already or warning if it is a 32-bit pointer. This zero extends + 32-bit pointers and sign extends other signed types. Note this only + works because ssize_t is long and short-short is promoted to int. */ +#define ARGIFY(X) \ + ((unsigned long long) \ + __builtin_choose_expr(__builtin_types_compatible_p(__typeof__(X), __typeof__((X) - (X))), \ + (X), \ + __builtin_choose_expr(__builtin_types_compatible_p(int, __typeof__((X) - (X))), \ + (X), \ + (unsigned long)(X)))) + # define LOAD_ARGS_0() \ - register long _x0 asm ("x0"); + register long long _x0 asm ("x0"); # define LOAD_ARGS_1(x0) \ - long _x0tmp = (long) (x0); \ + long long _x0tmp = ARGIFY (x0); \ LOAD_ARGS_0 () \ _x0 = _x0tmp; # define LOAD_ARGS_2(x0, x1) \ - long _x1tmp = (long) (x1); \ + long long _x1tmp = ARGIFY (x1); \ LOAD_ARGS_1 (x0) \ - register long _x1 asm ("x1") = _x1tmp; + register long long _x1 asm ("x1") = _x1tmp; # define LOAD_ARGS_3(x0, x1, x2) \ - long _x2tmp = (long) (x2); \ + long long _x2tmp = ARGIFY (x2); \ LOAD_ARGS_2 (x0, x1) \ - register long _x2 asm ("x2") = _x2tmp; + register long long _x2 asm ("x2") = _x2tmp; # define LOAD_ARGS_4(x0, x1, x2, x3) \ - long _x3tmp = (long) (x3); \ + long long _x3tmp = ARGIFY (x3); \ LOAD_ARGS_3 (x0, x1, x2) \ - register long _x3 asm ("x3") = _x3tmp; + register long long _x3 asm ("x3") = _x3tmp; # define LOAD_ARGS_5(x0, x1, x2, x3, x4) \ - long _x4tmp = (long) (x4); \ + long long _x4tmp = ARGIFY (x4); \ LOAD_ARGS_4 (x0, x1, x2, x3) \ - register long _x4 asm ("x4") = _x4tmp; + register long long _x4 asm ("x4") = _x4tmp; # define LOAD_ARGS_6(x0, x1, x2, x3, x4, x5) \ - long _x5tmp = (long) (x5); \ + long long _x5tmp = ARGIFY (x5); \ LOAD_ARGS_5 (x0, x1, x2, x3, x4) \ - register long _x5 asm ("x5") = _x5tmp; + register long long _x5 asm ("x5") = _x5tmp; # define LOAD_ARGS_7(x0, x1, x2, x3, x4, x5, x6)\ - long _x6tmp = (long) (x6); \ + long long _x6tmp = ARGIFY (x6); \ LOAD_ARGS_6 (x0, x1, x2, x3, x4, x5) \ - register long _x6 asm ("x6") = _x6tmp; + register long long _x6 asm ("x6") = _x6tmp; # define ASM_ARGS_0 # define ASM_ARGS_1 , "r" (_x0)