[1/3] csu: randomize location of TCB
Commit Message
Use mmap() for allocating TCB except if instructed by tunable
glibc.malloc.use_sbrk. This makes the location of TCB random instead
of always staying predictably next to data segment. When using mmap(),
improve the logic so that allocation of TCB can be assumed to fail
insted of segfaulting.
--
v2: introduce a tunable to use sbrk()
Signed-off-by: Topi Miettinen <toiwoton@gmail.com>
---
csu/libc-tls.c | 48 +++++++++++++++++++++----
elf/dl-tunables.list | 7 ++++
manual/tunables.texi | 5 +++
sysdeps/unix/sysv/linux/mmap64.c | 19 ++++++++++
sysdeps/unix/sysv/linux/mmap_internal.h | 5 +++
5 files changed, 78 insertions(+), 6 deletions(-)
Comments
On 25/11/2020 08:36, Topi Miettinen via Libc-alpha wrote:
> Use mmap() for allocating TCB except if instructed by tunable
> glibc.malloc.use_sbrk. This makes the location of TCB random instead
> of always staying predictably next to data segment. When using mmap(),
> improve the logic so that allocation of TCB can be assumed to fail
> insted of segfaulting.
>
> --
> v2: introduce a tunable to use sbrk()
>
> Signed-off-by: Topi Miettinen <toiwoton@gmail.com>
Could you address the points I raised on your first version [1]
first?
[1] https://sourceware.org/pipermail/libc-alpha/2020-November/119988.html
On 25.11.2020 15.18, Adhemerval Zanella wrote:
>
>
> On 25/11/2020 08:36, Topi Miettinen via Libc-alpha wrote:
>> Use mmap() for allocating TCB except if instructed by tunable
>> glibc.malloc.use_sbrk. This makes the location of TCB random instead
>> of always staying predictably next to data segment. When using mmap(),
>> improve the logic so that allocation of TCB can be assumed to fail
>> insted of segfaulting.
>>
>> --
>> v2: introduce a tunable to use sbrk()
>>
>> Signed-off-by: Topi Miettinen <toiwoton@gmail.com>
>
> Could you address the points I raised on your first version [1]
> first?
>
> [1] https://sourceware.org/pipermail/libc-alpha/2020-November/119988.html
Sorry, I'm not subscribed to the list, so I missed your review. I'll
refactor mmap() and __mmap_internal().
-Topi
On 25.11.2020 13.36, Topi Miettinen wrote:
> Use mmap() for allocating TCB except if instructed by tunable
> glibc.malloc.use_sbrk. This makes the location of TCB random instead
> of always staying predictably next to data segment. When using mmap(),
> improve the logic so that allocation of TCB can be assumed to fail
> insted of segfaulting.
>
> --
> v2: introduce a tunable to use sbrk()
I missed that tunables parsing uses sbrk() to allocate memory for a copy
of GLIBC_TUNABLES environment variable in tunables_strdup(). If the
variable is set, but kernel returns ENOSYS for brk() system call, every
command immediately fails:
$ GLIBC_TUNABLES=glibc.malloc.use_sbrk=1 ls
sbrk() failure while processing tunables
Some ideas:
- replace sbrk() with mmap(): up to 4095 bytes may be lost
- directly modify the environment variable, assuming that it will only
be shortened
-Topi
@@ -25,11 +25,21 @@
#include <sys/param.h>
#include <array_length.h>
#include <list.h>
+#include <sys/mman.h>
+#include <sysdep.h>
+
+#define HAVE_MMAP_INTERNAL 0
+#include <mmap_internal.h>
#ifdef SHARED
#error makefile bug, this file is for static only
#endif
+#if HAVE_TUNABLES
+# define TUNABLE_NAMESPACE malloc
+#endif
+#include <elf/dl-tunables.h>
+
dtv_t _dl_static_dtv[2 + TLS_SLOTINFO_SURPLUS];
@@ -135,25 +145,51 @@ __libc_setup_tls (void)
/* We have to set up the TCB block which also (possibly) contains
'errno'. Therefore we avoid 'malloc' which might touch 'errno'.
- Instead we use 'sbrk' which would only uses 'errno' if it fails.
- In this case we are right away out of memory and the user gets
- what she/he deserves. */
+ Instead we use '__mmap_internal' (when available) which does not
+ use 'errno', except if instructed by tunable
+ glibc.malloc.use_sbrk to use 'sbrk()' instead. If 'sbrk()' fails,
+ it will access 'errno' with catastrophic results. */
+
+ int error = 0;
+#if HAVE_TUNABLES && HAVE_MMAP_INTERNAL
+ int use_sbrk = 0;
+ use_sbrk = TUNABLE_GET (use_sbrk, int32_t, NULL);
+#endif
+
#if TLS_TCB_AT_TP
/* Align the TCB offset to the maximum alignment, as
_dl_allocate_tls_storage (in elf/dl-tls.c) does using __libc_memalign
and dl_tls_static_align. */
tcb_offset = roundup (memsz + GLRO(dl_tls_static_surplus), max_align);
- tlsblock = __sbrk (tcb_offset + TLS_INIT_TCB_SIZE + max_align);
+ size_t tlsblock_size = tcb_offset + TLS_INIT_TCB_SIZE + max_align;
+#if HAVE_MMAP_INTERNAL
+ if (!use_sbrk)
+ tlsblock = __mmap_internal (NULL, tlsblock_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0, &error);
+ else
+#endif
+ tlsblock = __sbrk (tlsblock_size);
+
#elif TLS_DTV_AT_TP
tcb_offset = roundup (TLS_INIT_TCB_SIZE, align ?: 1);
- tlsblock = __sbrk (tcb_offset + memsz + max_align
- + TLS_PRE_TCB_SIZE + GLRO(dl_tls_static_surplus));
+ size_t tlsblock_size = tcb_offset + memsz + max_align
+ + TLS_PRE_TCB_SIZE + GLRO(dl_tls_static_surplus);
+#if HAS_MMAP_INTERNAL
+ if (!use_sbrk)
+ tlsblock = __mmap_internal (NULL, tlsblock_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0, &error);
+ else
+#endif
+ tlsblock = __sbrk (tlsblock_size);
+
tlsblock += TLS_PRE_TCB_SIZE;
#else
/* In case a model with a different layout for the TCB and DTV
is defined add another #elif here and in the following #ifs. */
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
+ if (error)
+ _startup_fatal ("Cannot allocate TCB");
/* Align the TLS block. */
tlsblock = (void *) (((uintptr_t) tlsblock + max_align - 1)
@@ -91,6 +91,13 @@ glibc {
minval: 0
security_level: SXID_IGNORE
}
+ use_sbrk {
+ type: INT_32
+ minval: 0
+ maxval: 1
+ default: 0
+ security_level: SXID_IGNORE
+ }
}
cpu {
hwcap_mask {
@@ -227,6 +227,11 @@ pointer, so add 4 on 32-bit systems or 8 on 64-bit systems to the size
passed to @code{malloc} for the largest bin size to enable.
@end deftp
+@deftp Tunable glibc.malloc.use_sbrk
+A value of 1 instructs @theglibc{} to use @code{sbrk()} for memory
+allocation instead of @code{mmap()}.
+@end deftp
+
@node Dynamic Linking Tunables
@section Dynamic Linking Tunables
@cindex dynamic linking tunables
@@ -67,3 +67,22 @@ weak_alias (__mmap64, mmap)
weak_alias (__mmap64, __mmap)
libc_hidden_def (__mmap)
#endif
+
+void *
+__mmap_internal (void *addr, size_t len, int prot, int flags, int fd, off64_t offset, int *error)
+{
+ unsigned long int ret;
+#ifdef __NR_mmap2
+ ret = INTERNAL_SYSCALL_CALL (mmap2, addr, len, prot, flags, fd,
+ (off_t) (offset / MMAP2_PAGE_UNIT));
+#else
+ ret = INTERNAL_SYSCALL_CALL (mmap, addr, len, prot, flags, fd, offset);
+#endif
+ if (INTERNAL_SYSCALL_ERROR_P(ret))
+ {
+ *error = ret;
+ return MAP_FAILED;
+ }
+
+ return (void *) ret;
+}
@@ -46,4 +46,9 @@ static uint64_t page_unit;
INLINE_SYSCALL_CALL (__nr, __addr, __len, __prot, __flags, __fd, __offset)
#endif
+#undef HAVE_MMAP_INTERNAL
+#define HAVE_MMAP_INTERNAL 1
+/* Internal version of mmap() which doesn't attempt to access errno */
+void *__mmap_internal (void *addr, size_t len, int prot, int flags, int fd, off64_t offset, int *error);
+
#endif /* MMAP_INTERNAL_LINUX_H */