Hi Adhemerval,
this is a patch, which applies on top of your personal branch
azanella/bz12683. It implements the s390/s390x specific parts.
The kernel has a bug in the 32bit compat layer.
When sigcancel_handler is executed, SIGCANCEL is added to uc_sigmask
and SIGCANCEL is send to the same thread again. The current kernel
misinterprets the uc_sigmask, which leads to delivering SIGCANCEL
in an endless loop.
The kernel bug is being fixed. With the fixed kernel, it works as
expected on s390, too.
Bye Stefan
On 08/31/2015 11:10 PM, Adhemerval Zanella wrote:
> Hi all,
>
> This is an updated version of my previous patchset to fix BZ#12683 [1]
>
> The patchset fixes the x86_64, i386, x32, powerpc32, powerpc64{le}, aarch64,
> and ARM port. It will require some help for alpha, hppa, ia64, m68k, microblaze,
> nios2, s390, sh, sparc, and tile. I summarized in wiki page [2] the steps
> required to adjust the remaining architectures, but based on arm/aarch64 the
> minimal adjustments required are:
>
> 1. Write a new syscall implementation at sysdeps/unix/sysv/linux/<arch>/syscall_cancel.S
> that basically do:
>
> long int __syscall_cancel_arch (volatile unsigned int *cancelhandling,
> __syscall_arg_t nr, __syscall_arg_t arg1, __syscall_arg_t arg2,
> __syscall_arg_t arg3, __syscall_arg_t arg4, __syscall_arg_t arg5,
> __syscall_arg_t arg6)
> {
> if (*cancelhandling & CANCELED_BITMASK)
> __syscall_do_cancel()
>
> return INLINE_SYSCALL (nr, 6, arg1, arg2, arg3, arg4, arg5, arg6);
> }
>
> 2. Adjust sysdeps/unix/sysv/linux/<arch>/sysdep-cancel.h to make cancellable
> syscalls to call __syscall_cancel instead of *_{enable,disable}_asynccancel.
>
> 3. Create a function to get current IP address based on ucontext_t:
>
> static inline
> long int __pthread_get_ip (const struct ucontext *uc)
> {
> // TODO
> }
>
> 4. Define both SYSCALL_CANCEL_ERROR(__val) and SYSCALL_CANCEL_ERRNO(__val)
> macros.
>
> For x86_64 and i386 implementation my approach was to just remove the
> pthread_cond_{timed}wait assembly implementation and use default C code, but
> since Torvald Riegel new condvar implementation [3] also removed them this
> patchset do not contain such removals. Also, this fix is easy to adjust
> to new futex API also proposed by Torvalds and I can adjust the patch when
> the new API is pushed upstream. The bulk of implementation just depend of a
> cancellable futex call done by new mechanism which is orthogonal of the new
> proposed futex API.
>
> The idea is try to push it for 2.23 and I have a personal branch [4] with
> a working tree for the aforementioned architectures plus some skeleton for
> s390 and s390x (not working thou).
>
> [1] https://sourceware.org/ml/libc-alpha/2015-06/msg00895.html
> [2] https://sourceware.org/glibc/wiki/Release/2.21/bz12683
> [3] https://sourceware.org/ml/libc-alpha/2015-05/msg00287.htm
> [4] https://sourceware.org/git/?p=glibc.git;a=shortlog;h=refs/heads/azanella/bz12683
>
@@ -1,4 +1,4 @@
-/* Cancellable syscall wrapper - aarch64 version.
+/* Cancellable syscall wrapper - s390 version.
Copyright (C) 2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
@@ -18,7 +18,34 @@
#include <sysdep.h>
+/* long int [r2] __syscall_cancel_arch (int *cancelhandling [r2],
+ long int nr [r3],
+ long int arg1 [r4],
+ long int arg2 [r5],
+ long int arg3 [r6],
+ long int arg4 [SP+96],
+ long int arg5 [SP+100],
+ long int arg6 [SP+104]) */
+
ENTRY (__syscall_cancel_arch)
+ /* Save registers and setup stack. */
+ stm %r6,%r15,24(%r15) /* Save registers */
+ cfi_offset (%r15, -36)
+ cfi_offset (%r14, -40)
+ cfi_offset (%r13, -44)
+ cfi_offset (%r12, -48)
+ cfi_offset (%r11, -52)
+ cfi_offset (%r10, -56)
+ cfi_offset (%r9, -60)
+ cfi_offset (%r8, -64)
+ cfi_offset (%r7, -68)
+ cfi_offset (%r6, -72)
+ lr %r1,%r15
+ l %r0,4(0,%r15) /* Load eos */
+ ahi %r15,-96 /* Buy stack space */
+ cfi_adjust_cfa_offset (96)
+ st %r1,0(0,%r15) /* Store back chain */
+ st %r0,4(0,%r15) /* Store eos */
.globl __syscall_cancel_arch_start
.type __syscall_cancel_arch_start,@function
@@ -26,14 +53,33 @@ __syscall_cancel_arch_start:
/* if (*cancelhandling & CANCELED_BITMASK)
__syscall_do_cancel() */
+ tm 3(%r2),4
+ jne 1f
/* Issue a 6 argument syscall */
+ lr %r1,%r3 /* Move syscall number. */
+ lr %r2,%r4 /* First parameter. */
+ lr %r3,%r5 /* Second parameter. */
+ lr %r4,%r6 /* Third parameter. */
+ l %r5,192(%r15) /* Fourth parameter. */
+ l %r6,196(%r15) /* Fifth parameter. */
+ l %r7,200(%r15) /* Sixth parameter. */
+ svc 0 /* svc number is always in r1. */
.globl __syscall_cancel_arch_end
.type __syscall_cancel_arch_end,@function
__syscall_cancel_arch_end:
+ l %r15,0(%r15) /* Load back chain. */
+ cfi_adjust_cfa_offset (-96)
+ lm %r6,15,24(%r15) /* Load registers. */
- /* Branch to __syscall_do_cancel */
+ br %r14
+ /* Branch to __syscall_do_cancel */
+1:
+ l %r15,0(%r15) /* Load back chain. */
+ cfi_adjust_cfa_offset (-96)
+ lm %r6,%r15,24(%r15) /* Load registers. */
+ jg __syscall_do_cancel
END (__syscall_cancel_arch)
libc_hidden_def (__syscall_cancel_arch)
@@ -24,6 +24,49 @@
#if IS_IN (libc) || IS_IN (libpthread) || IS_IN (librt)
+# if IS_IN (libc)
+# define PREPARE_CALL
+# define PREPARE_GOT
+# define JMP_SYSCALL_CANCEL HIDDEN_JUMPTARGET(__syscall_cancel)
+# else
+# define PREPARE_CALL l %r12,2f-0b(%r13); \
+ la %r12,0(%r12,%r13);
+# define PREPARE_GOT 2: .long _GLOBAL_OFFSET_TABLE_-0b;
+# define JMP_SYSCALL_CANCEL __syscall_cancel@plt
+# endif
+
+# define STORE_0 /* Nothing */
+# define STORE_1 /* Nothing */
+# define STORE_2 /* Nothing */
+# define STORE_3 /* Nothing */
+# define STORE_4 st %r6,24(%r15); \
+ cfi_offset (%r6,-72);
+# define STORE_5 STORE_4
+# define STORE_6 STORE_4
+
+# define LOAD_0 /* Nothing */
+# define LOAD_1 /* Nothing */
+# define LOAD_2 /* Nothing */
+# define LOAD_3 /* Nothing */
+# define LOAD_4 l %r6,24(%r15);
+# define LOAD_5 LOAD_4
+# define LOAD_6 LOAD_4
+
+# define MOVE_ARGS_0
+# define MOVE_ARGS_1 lr %r3,%r2; \
+ MOVE_ARGS_0
+# define MOVE_ARGS_2 lr %r4,%r3; \
+ MOVE_ARGS_1
+# define MOVE_ARGS_3 lr %r5,%r4; \
+ MOVE_ARGS_2
+# define MOVE_ARGS_4 lr %r6,%r5; \
+ MOVE_ARGS_3
+# define MOVE_ARGS_5 st %r6,96(%r15); \
+ MOVE_ARGS_4
+# define MOVE_ARGS_6 l %r14,96(%r14); \
+ st %r14,100(%r15); \
+ MOVE_ARGS_5
+
# undef PSEUDO
# define PSEUDO(name, syscall_name, args) \
.text; \
@@ -34,40 +77,35 @@ L(pseudo_cancel): \
cfi_offset (%r14, -40); \
cfi_offset (%r13, -44); \
cfi_offset (%r12, -48); \
+ STORE_##args \
lr %r14,%r15; \
- ahi %r15,-96; \
- cfi_adjust_cfa_offset (96); \
+ ahi %r15,-104; \
+ cfi_adjust_cfa_offset (104); \
st %r14,0(%r15); \
+ MOVE_ARGS_##args \
+ lhi %r2,SYS_ify (syscall_name); \
basr %r13,0; \
0: l %r1,1f-0b(%r13); \
+ PREPARE_CALL \
bas %r14,0(%r1,%r13); \
- lr %r0,%r2; \
- .if SYS_ify (syscall_name) < 256; \
- svc SYS_ify (syscall_name); \
- .else; \
- lhi %r1,SYS_ify (syscall_name); \
- svc 0; \
- .endif; \
- l %r1,2f-0b(%r13); \
- lr %r12,%r2; \
- lr %r2,%r0; \
- bas %r14,0(%r1,%r13); \
- lr %r2,%r12; \
- lm %r12,%r15,48+96(%r15); \
+ lm %r12,%r15,48+104(%r15); \
+ cfi_restore (%r12); \
+ cfi_restore (%r13); \
+ cfi_restore (%r14); \
+ cfi_restore (%r15); \
+ LOAD_##args \
cfi_endproc; \
j L(pseudo_check); \
+1: .long JMP_SYSCALL_CANCEL-0b; \
+ PREPARE_GOT \
ENTRY(name) \
SINGLE_THREAD_P(%r1) \
jne L(pseudo_cancel); \
-.type __##syscall_name##_nocancel,@function; \
-.globl __##syscall_name##_nocancel; \
-__##syscall_name##_nocancel: \
DO_CALL(syscall_name, args); \
L(pseudo_check): \
lhi %r4,-4095; \
clr %r2,%r4; \
jnl SYSCALL_ERROR_LABEL; \
-.size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
L(pseudo_end):
# ifndef __ASSEMBLER__
@@ -95,7 +133,7 @@ L(pseudo_end):
static inline
long int __pthread_get_ip (const struct ucontext *uc)
{
- /* TODO: return IP address from ucontext. */
- return 0L;
+ /* We have 31bit addresses, remove bit 0. */
+ return uc->uc_mcontext.psw.addr & 0x7FFFFFFF;
}
#endif
@@ -1,4 +1,4 @@
-/* Cancellable syscall wrapper - aarch64 version.
+/* Cancellable syscall wrapper - s390x version.
Copyright (C) 2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
@@ -18,22 +18,70 @@
#include <sysdep.h>
+/* long int [r2] __syscall_cancel_arch (int *cancelhandling [r2],
+ long int nr [r3],
+ long int arg1 [r4],
+ long int arg2 [r5],
+ long int arg3 [r6],
+ long int arg4 [SP+160],
+ long int arg5 [SP+168],
+ long int arg6 [SP+176]) */
+
ENTRY (__syscall_cancel_arch)
+ /* Save registers and setup stack. */
+ stmg %r6,%r15,48(%r15) /* Save registers. */
+ cfi_offset (%r15,-40)
+ cfi_offset (%r14,-48)
+ cfi_offset (%r13,-56)
+ cfi_offset (%r12,-64)
+ cfi_offset (%r11,-72)
+ cfi_offset (%r10,-80)
+ cfi_offset (%r9,-88)
+ cfi_offset (%r8,-96)
+ cfi_offset (%r7,-104)
+ cfi_offset (%r6,-112)
+ lgr %r1,%r15
+ lg %r0,8(%r15) /* Load eos. */
+ aghi %r15,-160 /* Buy stack space. */
+ cfi_adjust_cfa_offset (160)
+ stg %r1,0(%r15) /* Store back chain. */
+ stg %r0,8(%r15) /* Store eos. */
+
.globl __syscall_cancel_arch_start
.type __syscall_cancel_arch_start,@function
__syscall_cancel_arch_start:
/* if (*cancelhandling & CANCELED_BITMASK)
__syscall_do_cancel() */
+ tm 3(%r2),4
+ jne 1f
/* Issue a 6 argument syscall */
+ lgr %r1,%r3 /* Move syscall number. */
+ lgr %r2,%r4 /* First parameter. */
+ lgr %r3,%r5 /* Second parameter. */
+ lgr %r4,%r6 /* Third parameter. */
+ lg %r5,320(%r15) /* Fourth parameter. */
+ lg %r6,328(%r15) /* Fifth parameter. */
+ lg %r7,336(%r15) /* Sixth parameter. */
+
+ svc 0 /* svc number is always in r1. */
.globl __syscall_cancel_arch_end
.type __syscall_cancel_arch_end,@function
__syscall_cancel_arch_end:
+ lg %r15,0(%r15) /* load back chain */
+ cfi_adjust_cfa_offset (-160)
+ lmg %r6,%r15,48(%r15) /* Load registers. */
- /* Branch to __syscall_do_cancel */
+ br %r14
+ /* Branch to __syscall_do_cancel */
+1:
+ lg %r15,0(%r15) /* load back chain */
+ cfi_adjust_cfa_offset (-160)
+ lmg %r6,%r15,48(%r15) /* Load registers. */
+ jg __syscall_do_cancel
END (__syscall_cancel_arch)
libc_hidden_def (__syscall_cancel_arch)
@@ -24,44 +24,75 @@
#if IS_IN (libc) || IS_IN (libpthread) || IS_IN (librt)
+# if IS_IN (libc)
+# define JMP_SYSCALL_CANCEL HIDDEN_JUMPTARGET(__syscall_cancel)
+# else
+# define JMP_SYSCALL_CANCEL __syscall_cancel@plt
+# endif
+
+# define STORE_0 /* Nothing */
+# define STORE_1 /* Nothing */
+# define STORE_2 /* Nothing */
+# define STORE_3 /* Nothing */
+# define STORE_4 stg %r6,48(%r15); \
+ cfi_offset (%r6,-112);
+# define STORE_5 STORE_4
+# define STORE_6 STORE_4
+
+# define LOAD_0 /* Nothing */
+# define LOAD_1 /* Nothing */
+# define LOAD_2 /* Nothing */
+# define LOAD_3 /* Nothing */
+# define LOAD_4 lg %r6,48(%r15);
+# define LOAD_5 LOAD_4
+# define LOAD_6 LOAD_4
+
+# define MOVE_ARGS_0
+# define MOVE_ARGS_1 lgr %r3,%r2; \
+ MOVE_ARGS_0
+# define MOVE_ARGS_2 lgr %r4,%r3; \
+ MOVE_ARGS_1
+# define MOVE_ARGS_3 lgr %r5,%r4; \
+ MOVE_ARGS_2
+# define MOVE_ARGS_4 lgr %r6,%r5; \
+ MOVE_ARGS_3
+# define MOVE_ARGS_5 stg %r6,160(%r15); \
+ MOVE_ARGS_4
+# define MOVE_ARGS_6 lg %r14,160(%r14); \
+ stg %r14,168(%r15); \
+ MOVE_ARGS_5
+
+
# undef PSEUDO
# define PSEUDO(name, syscall_name, args) \
.text; \
L(pseudo_cancel): \
cfi_startproc; \
- stmg %r13,%r15,104(%r15); \
+ stmg %r14,%r15,112(%r15); \
cfi_offset (%r15,-40); \
cfi_offset (%r14,-48); \
- cfi_offset (%r13,-56); \
+ STORE_##args \
lgr %r14,%r15; \
- aghi %r15,-160; \
- cfi_adjust_cfa_offset (160); \
+ aghi %r15,-176; \
+ cfi_adjust_cfa_offset (176); \
stg %r14,0(%r15); \
- lgr %r0,%r2; \
- .if SYS_ify (syscall_name) < 256; \
- svc SYS_ify (syscall_name); \
- .else; \
- lghi %r1,SYS_ify (syscall_name); \
- svc 0; \
- .endif; \
- lgr %r13,%r2; \
- lgr %r2,%r0; \
- lgr %r2,%r13; \
- lmg %r13,%r15,104+160(%r15); \
+ MOVE_ARGS_##args \
+ lghi %r2,SYS_ify (syscall_name); \
+ brasl %r14,JMP_SYSCALL_CANCEL; \
+ lmg %r14,%r15,112+176(%r15); \
+ cfi_restore (%r14); \
+ cfi_restore (%r15); \
+ LOAD_##args \
cfi_endproc; \
j L(pseudo_check); \
ENTRY(name) \
SINGLE_THREAD_P \
jne L(pseudo_cancel); \
-.type __##syscall_name##_nocancel,@function; \
-.globl __##syscall_name##_nocancel; \
-__##syscall_name##_nocancel: \
DO_CALL(syscall_name, args); \
L(pseudo_check): \
lghi %r4,-4095; \
clgr %r2,%r4; \
jgnl SYSCALL_ERROR_LABEL; \
-.size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
L(pseudo_end):
# if IS_IN (libpthread)
@@ -114,7 +145,6 @@ extern int __local_multiple_threads attribute_hidden;
static inline
long int __pthread_get_ip (const struct ucontext *uc)
{
- /* TODO: return IP address from ucontext. */
- return 0L;
+ return uc->uc_mcontext.psw.addr;
}
#endif
@@ -250,8 +250,8 @@
#define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
#undef SYSCALL_CANCEL_ERROR
-#define SYSCALL_CANCEL_ERROR(__val) \
- ((unsigned int) (__val) >= -4095)
+#define SYSCALL_CANCEL_ERROR(__val) \
+ ((unsigned long) (__val) >= -4095UL)
#undef SYSCALL_CANCEL_ERRNO
#define SYSCALL_CANCEL_ERRNO(__val) \