[v5,10/22] riscv: Fix Race conditions in pthread cancellation [BZ#12683]
Checks
Context |
Check |
Description |
dj/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
Commit Message
By adding the required syscall_cancel.S.
Reviewed-by: Andrew Waterman <aswaterman@gmail.com>
---
.../unix/sysv/linux/riscv/syscall_cancel.S | 67 +++++++++++++++++++
1 file changed, 67 insertions(+)
create mode 100644 sysdeps/unix/sysv/linux/riscv/syscall_cancel.S
Comments
On Mon, 2023-04-10 at 17:46 -0300, Adhemerval Zanella via Libc-alpha
wrote:
> +1:
> + addi sp, sp, -16
> + cfi_def_cfa_offset (16)
> + REG_S ra, (16-SZREG)(sp)
> + cfi_offset (ra, -SZREG)
> + call __syscall_do_cancel
Similarly to LoongArch, IMO this should simply be
"tail __syscall_do_cancel".
Apparently _Noreturn is preventing GCC from generating a tail call for
RISC-V and LoongArch, so if you modeled the assembly following the GCC
output you'll get these sub-optimal things. It seems a GCC bug to me.
On Tue, 2023-04-11 at 14:35 +0800, Xi Ruoyao wrote:
> On Mon, 2023-04-10 at 17:46 -0300, Adhemerval Zanella via Libc-alpha
> wrote:
> > +1:
> > + addi sp, sp, -16
> > + cfi_def_cfa_offset (16)
> > + REG_S ra, (16-SZREG)(sp)
> > + cfi_offset (ra, -SZREG)
> > + call __syscall_do_cancel
>
> Similarly to LoongArch, IMO this should simply be
> "tail __syscall_do_cancel".
>
> Apparently _Noreturn is preventing GCC from generating a tail call for
> RISC-V and LoongArch
Actually "everywhere" (but why the AArch64 code seems optimized?)
> so if you modeled the assembly following the GCC
> output you'll get these sub-optimal things. It seems a GCC bug to me.
It's https://gcc.gnu.org/PR10837 and closed as WONTFIX 20 years ago :(.
And a comment from H.J. complained an impact on Glibc.
On 11/04/23 04:01, Xi Ruoyao wrote:
> On Tue, 2023-04-11 at 14:35 +0800, Xi Ruoyao wrote:
>> On Mon, 2023-04-10 at 17:46 -0300, Adhemerval Zanella via Libc-alpha
>> wrote:
>>> +1:
>>> + addi sp, sp, -16
>>> + cfi_def_cfa_offset (16)
>>> + REG_S ra, (16-SZREG)(sp)
>>> + cfi_offset (ra, -SZREG)
>>> + call __syscall_do_cancel
>>
>> Similarly to LoongArch, IMO this should simply be
>> "tail __syscall_do_cancel".
>>
>> Apparently _Noreturn is preventing GCC from generating a tail call for
>> RISC-V and LoongArch
>
> Actually "everywhere" (but why the AArch64 code seems optimized?)
I hand optimized on some architectures, like x86_64 and aarch64. For most
of the other architectures I just modeled after compiler generated call.
>
>> so if you modeled the assembly following the GCC
>> output you'll get these sub-optimal things. It seems a GCC bug to me.
>
> It's https://gcc.gnu.org/PR10837 and closed as WONTFIX 20 years ago :(.
> And a comment from H.J. complained an impact on Glibc.
>
>
new file mode 100644
@@ -0,0 +1,67 @@
+/* Cancellable syscall wrapper. Linux/riscv version.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <descr-const.h>
+
+/* long int __syscall_cancel_arch (int *cancelhandling,
+ __syscall_arg_t nr,
+ __syscall_arg_t arg1,
+ __syscall_arg_t arg2,
+ __syscall_arg_t arg3,
+ __syscall_arg_t arg4,
+ __syscall_arg_t arg5,
+ __syscall_arg_t arg6) */
+
+#ifdef SHARED
+ .option pic
+#else
+ .option nopic
+#endif
+
+ENTRY (__syscall_cancel_arch)
+
+ .globl __syscall_cancel_arch_start
+__syscall_cancel_arch_start:
+ lw t1, 0(a0)
+ /* if (*ch & CANCELED_BITMASK) */
+ andi t1, t1, TCB_CANCELED_BITMASK
+ bne t1, zero, 1f
+
+ mv t3, a1
+ mv a0, a2
+ mv a1, a3
+ mv a2, a4
+ mv a3, a5
+ mv a4, a6
+ mv a5, a7
+ mv a7, t3
+ scall
+
+ .globl __syscall_cancel_arch_end
+__syscall_cancel_arch_end:
+ ret
+
+1:
+ addi sp, sp, -16
+ cfi_def_cfa_offset (16)
+ REG_S ra, (16-SZREG)(sp)
+ cfi_offset (ra, -SZREG)
+ call __syscall_do_cancel
+
+END (__syscall_cancel_arch)