[BZ,#19490] Add unwind descriptors to pthread_spin_init, etc. on i386
Commit Message
Greetings,
This is similar to
https://www.sourceware.org/ml/libc-alpha/2016-01/msg00605.html
https://www.sourceware.org/ml/libc-alpha/2016-01/msg00456.html
but for i386.
Tested on Linux/i686.
Assuming this is OK, should I wait for lifting of the freeze?
2016-01-23 Paul Pluzhnikov <ppluzhnikov@google.com>
[BZ #19490]
* sysdeps/i386/i386-mcount.S (_mcount): Add unwind descriptor
(__fentry__): Likewise
* sysdeps/unix/sysv/linux/i386/_exit.S (_exit): Use ENTRY/END
* sysdeps/i386/nptl/pthread_spin_lock.S (pthread_spin_lock):
Likewise
* sysdeps/i386/pthread_spin_trylock.S (pthread_spin_trylock):
Likewise
* sysdeps/i386/nptl/pthread_spin_unlock.S (pthread_spin_unlock):
Likewise
Comments
On Sat, 2016-01-23 at 16:18 -0800, Paul Pluzhnikov wrote:
> 2016-01-23 Paul Pluzhnikov <ppluzhnikov@google.com>
>
> [BZ #19490]
> * sysdeps/i386/i386-mcount.S (_mcount): Add unwind descriptor
> (__fentry__): Likewise
> * sysdeps/unix/sysv/linux/i386/_exit.S (_exit): Use ENTRY/END
> * sysdeps/i386/nptl/pthread_spin_lock.S (pthread_spin_lock):
> Likewise
> * sysdeps/i386/pthread_spin_trylock.S (pthread_spin_trylock):
> Likewise
> * sysdeps/i386/nptl/pthread_spin_unlock.S (pthread_spin_unlock):
> Likewise
For the spinlocks, I'd really prefer if we could just remove the asm
files. The generic implementation should basically produce the same
code; if not, we should try to fix that instead of keeping the asm
files.
On Mon, Jan 25, 2016 at 5:06 AM, Torvald Riegel <triegel@redhat.com> wrote:
> For the spinlocks, I'd really prefer if we could just remove the asm
> files. The generic implementation should basically produce the same
> code; if not, we should try to fix that instead of keeping the asm
> files.
Using gcc-4.8.4 (4.8.4-2ubuntu1~14.04):
$ objdump -d nptl/pthread_spin_unlock.o
nptl/pthread_spin_unlock.o: file format elf32-i386
Disassembly of section .text:
00000000 <pthread_spin_unlock>:
0: f0 83 0c 24 00 lock orl $0x0,(%esp)
5: 8b 44 24 04 mov 0x4(%esp),%eax
9: c7 00 00 00 00 00 movl $0x0,(%eax)
f: 31 c0 xor %eax,%eax
11: c3 ret
This isn't quite the same as sysdeps/i386/nptl/pthread_spin_unlock.S
For pthread_spin_lock it's much worse:
$ objdump -d nptl/pthread_spin_lock.o
nptl/pthread_spin_lock.o: file format elf32-i386
Disassembly of section .text:
00000000 <pthread_spin_lock>:
0: 57 push %edi
1: b8 01 00 00 00 mov $0x1,%eax
6: 56 push %esi
7: 53 push %ebx
8: 83 ec 10 sub $0x10,%esp
b: 8b 5c 24 20 mov 0x20(%esp),%ebx
f: 87 03 xchg %eax,(%ebx)
11: 89 44 24 0c mov %eax,0xc(%esp)
15: 8b 44 24 0c mov 0xc(%esp),%eax
19: 31 ff xor %edi,%edi
1b: be 01 00 00 00 mov $0x1,%esi
20: 85 c0 test %eax,%eax
22: 74 29 je 4d <pthread_spin_lock+0x4d>
24: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
28: 8b 03 mov (%ebx),%eax
2a: 85 c0 test %eax,%eax
2c: 74 15 je 43 <pthread_spin_lock+0x43>
2e: ba e8 03 00 00 mov $0x3e8,%edx
33: eb 08 jmp 3d <pthread_spin_lock+0x3d>
35: 8d 76 00 lea 0x0(%esi),%esi
38: 83 ea 01 sub $0x1,%edx
3b: 74 06 je 43 <pthread_spin_lock+0x43>
3d: 8b 0b mov (%ebx),%ecx
3f: 85 c9 test %ecx,%ecx
41: 75 f5 jne 38 <pthread_spin_lock+0x38>
43: 89 f8 mov %edi,%eax
45: f0 0f b1 33 lock cmpxchg %esi,(%ebx)
49: 85 c0 test %eax,%eax
4b: 75 db jne 28 <pthread_spin_lock+0x28>
4d: 83 c4 10 add $0x10,%esp
50: 31 c0 xor %eax,%eax
52: 5b pop %ebx
53: 5e pop %esi
54: 5f pop %edi
55: c3 ret
So I am not sure we should be throwing ASM out just yet.
@@ -30,10 +30,17 @@
.type C_SYMBOL_NAME(_mcount), @function
.align ALIGNARG(4)
C_LABEL(_mcount)
+ cfi_startproc
/* Save the caller-clobbered registers. */
pushl %eax
+ cfi_adjust_cfa_offset (4)
pushl %ecx
+ cfi_adjust_cfa_offset (4)
pushl %edx
+ cfi_adjust_cfa_offset (4)
+ cfi_rel_offset (eax, 8)
+ cfi_rel_offset (ecx, 4)
+ cfi_rel_offset (edx, 0)
movl 12(%esp), %edx
movl 4(%ebp), %eax
@@ -45,9 +52,16 @@ C_LABEL(_mcount)
/* Pop the saved registers. Please note that `mcount' has no
return value. */
popl %edx
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (edx)
popl %ecx
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (ecx)
popl %eax
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (eax)
ret
+ cfi_endproc
ASM_SIZE_DIRECTIVE(C_SYMBOL_NAME(_mcount))
#undef mcount
@@ -58,10 +72,17 @@ weak_alias (_mcount, mcount)
.type C_SYMBOL_NAME(__fentry__), @function
.align ALIGNARG(4)
C_LABEL(__fentry__)
+ cfi_startproc
/* Save the caller-clobbered registers. */
pushl %eax
+ cfi_adjust_cfa_offset (4)
pushl %ecx
+ cfi_adjust_cfa_offset (4)
pushl %edx
+ cfi_adjust_cfa_offset (4)
+ cfi_rel_offset (eax, 8)
+ cfi_rel_offset (ecx, 4)
+ cfi_rel_offset (edx, 0)
movl 12(%esp), %edx
movl 16(%esp), %eax
@@ -73,7 +94,14 @@ C_LABEL(__fentry__)
/* Pop the saved registers. Please note that `__fentry__' has no
return value. */
popl %edx
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (edx)
popl %ecx
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (ecx)
popl %eax
+ cfi_adjust_cfa_offset (-4)
+ cfi_restore (eax)
ret
+ cfi_endproc
ASM_SIZE_DIRECTIVE(C_SYMBOL_NAME(__fentry__))
@@ -16,11 +16,9 @@
<http://www.gnu.org/licenses/>. */
#include <lowlevellock.h>
+#include <sysdep.h>
- .globl pthread_spin_lock
- .type pthread_spin_lock,@function
- .align 16
-pthread_spin_lock:
+ENTRY(pthread_spin_lock)
mov 4(%esp), %eax
1: LOCK
decl 0(%eax)
@@ -34,4 +32,4 @@ pthread_spin_lock:
cmpl $0, 0(%eax)
jg 1b
jmp 2b
- .size pthread_spin_lock,.-pthread_spin_lock
+END(pthread_spin_lock)
@@ -16,15 +16,14 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
- .globl pthread_spin_unlock
- .type pthread_spin_unlock,@function
- .align 16
-pthread_spin_unlock:
+#include <sysdep.h>
+
+ENTRY(pthread_spin_unlock)
movl 4(%esp), %eax
movl $1, (%eax)
xorl %eax, %eax
ret
- .size pthread_spin_unlock,.-pthread_spin_unlock
+END(pthread_spin_unlock)
/* The implementation of pthread_spin_init is identical. */
.globl pthread_spin_init
@@ -17,7 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <pthread-errnos.h>
-
+#include <sysdep.h>
#ifdef UP
# define LOCK
@@ -25,10 +25,7 @@
# define LOCK lock
#endif
- .globl pthread_spin_trylock
- .type pthread_spin_trylock,@function
- .align 16
-pthread_spin_trylock:
+ENTRY(pthread_spin_trylock)
movl 4(%esp), %edx
movl $1, %eax
xorl %ecx, %ecx
@@ -43,4 +40,4 @@ pthread_spin_trylock:
0:
#endif
ret
- .size pthread_spin_trylock,.-pthread_spin_trylock
+END(pthread_spin_trylock)
@@ -17,10 +17,7 @@
#include <sysdep.h>
- .text
- .type _exit,@function
- .global _exit
-_exit:
+ENTRY(_exit)
movl 4(%esp), %ebx
/* Try the new syscall first. */
@@ -37,7 +34,7 @@ _exit:
/* This must not fail. Be sure we don't return. */
hlt
- .size _exit,.-_exit
+END(_exit)
libc_hidden_def (_exit)
rtld_hidden_def (_exit)