[v4,1/5] support: Add support_stack_alloc
Commit Message
The code to allocate a stack from xsigstack is refactored so it can
be more generic.
---
support/Makefile | 1 +
support/support.h | 20 +++++++++
support/support_stack_alloc.c | 78 +++++++++++++++++++++++++++++++++++
support/xsigstack.c | 43 +++----------------
4 files changed, 104 insertions(+), 38 deletions(-)
create mode 100644 support/support_stack_alloc.c
Comments
* Adhemerval Zanella via Libc-alpha:
> + /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
> + bands; touch all the pages of the actual stack before returning,
> + so we know they are allocated. */
> + void *alloc_base = xmmap (0,
> + alloc_size,
> + PROT_READ|PROT_WRITE,
> + MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
> + -1);
> + xmprotect (alloc_base, guardsize, PROT_NONE);
> + xmprotect (alloc_base + guardsize + stacksize, guardsize, PROT_NONE);
The usual pattern is to map with PROT_NONE and then use
PROT_READ|PROT_WRITE with mprotect.
Rest looks okay, thanks.
Florian
On 10/03/2021 13:10, Florian Weimer wrote:
> * Adhemerval Zanella via Libc-alpha:
>
>> + /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
>> + bands; touch all the pages of the actual stack before returning,
>> + so we know they are allocated. */
>> + void *alloc_base = xmmap (0,
>> + alloc_size,
>> + PROT_READ|PROT_WRITE,
>> + MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
>> + -1);
>> + xmprotect (alloc_base, guardsize, PROT_NONE);
>> + xmprotect (alloc_base + guardsize + stacksize, guardsize, PROT_NONE);
>
> The usual pattern is to map with PROT_NONE and then use
> PROT_READ|PROT_WRITE with mprotect.
>
> Rest looks okay, thanks.
>
> Florian
>
Ok, I will fix it and push upstream.
* Adhemerval Zanella:
> On 10/03/2021 13:10, Florian Weimer wrote:
>> * Adhemerval Zanella via Libc-alpha:
>>
>>> + /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
>>> + bands; touch all the pages of the actual stack before returning,
>>> + so we know they are allocated. */
>>> + void *alloc_base = xmmap (0,
>>> + alloc_size,
>>> + PROT_READ|PROT_WRITE,
>>> + MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
>>> + -1);
>>> + xmprotect (alloc_base, guardsize, PROT_NONE);
>>> + xmprotect (alloc_base + guardsize + stacksize, guardsize, PROT_NONE);
>>
>> The usual pattern is to map with PROT_NONE and then use
>> PROT_READ|PROT_WRITE with mprotect.
>>
>> Rest looks okay, thanks.
>>
>> Florian
>>
>
> Ok, I will fix it and push upstream.
Just to be clear, it avoids the need for MAP_NORESERVE.
Thanks,
Florian
On 12/03/2021 10:35, Florian Weimer wrote:
> * Adhemerval Zanella:
>
>> On 10/03/2021 13:10, Florian Weimer wrote:
>>> * Adhemerval Zanella via Libc-alpha:
>>>
>>>> + /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
>>>> + bands; touch all the pages of the actual stack before returning,
>>>> + so we know they are allocated. */
>>>> + void *alloc_base = xmmap (0,
>>>> + alloc_size,
>>>> + PROT_READ|PROT_WRITE,
>>>> + MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
>>>> + -1);
>>>> + xmprotect (alloc_base, guardsize, PROT_NONE);
>>>> + xmprotect (alloc_base + guardsize + stacksize, guardsize, PROT_NONE);
>>>
>>> The usual pattern is to map with PROT_NONE and then use
>>> PROT_READ|PROT_WRITE with mprotect.
>>>
>>> Rest looks okay, thanks.
>>>
>>> Florian
>>>
>>
>> Ok, I will fix it and push upstream.
>
> Just to be clear, it avoids the need for MAP_NORESERVE.
Right, I will fix it.
@@ -39,6 +39,7 @@ libsupport-routines = \
resolv_response_context_free \
resolv_test \
set_fortify_handler \
+ support_stack_alloc \
support-xfstat \
support-xstat \
support_become_root \
@@ -129,6 +129,26 @@ extern void support_copy_file (const char *from, const char *to);
extern ssize_t support_copy_file_range (int, off64_t *, int, off64_t *,
size_t, unsigned int);
+
+struct support_stack
+{
+ void *stack;
+ size_t size;
+ size_t guardsize;
+};
+
+/* Allocate stack suitable to used with xclone or sigaltstack call. The stack
+ will have a minimum size of SIZE + MINSIGSTKSZ bytes, rounded up to a whole
+ number of pages. There will be a large (at least 1 MiB) inaccessible guard
+ bands on either side of it.
+ The returned value on ALLOC_BASE and ALLOC_SIZE will be the usable stack
+ region, excluding the GUARD_SIZE allocated area.
+ It also terminates the process on error. */
+struct support_stack support_stack_alloc (size_t size);
+
+/* Deallocate the STACK. */
+void support_stack_free (struct support_stack *stack);
+
__END_DECLS
#endif /* SUPPORT_H */
new file mode 100644
@@ -0,0 +1,78 @@
+/* Allocate a stack suitable to be used with xclone or xsigaltstack.
+ Copyright (C) 2021 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <support/check.h>
+#include <support/support.h>
+#include <support/xunistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/param.h> /* roundup, MAX */
+
+#ifndef MAP_NORESERVE
+# define MAP_NORESERVE 0
+#endif
+#ifndef MAP_STACK
+# define MAP_STACK 0
+#endif
+
+struct support_stack
+support_stack_alloc (size_t size)
+{
+ size_t pagesize = sysconf (_SC_PAGESIZE);
+ if (pagesize == -1)
+ FAIL_EXIT1 ("sysconf (_SC_PAGESIZE): %m\n");
+
+ /* Always supply at least MINSIGSTKSZ space; passing 0 as size means
+ only that much space. No matter what the number is, round it up
+ to a whole number of pages. */
+ size_t stacksize = roundup (size + MINSIGSTKSZ, pagesize);
+
+ /* The guard bands need to be large enough to intercept offset
+ accesses from a stack address that might otherwise hit another
+ mapping. Make them at least twice as big as the stack itself, to
+ defend against an offset by the entire size of a large
+ stack-allocated array. The minimum is 1MiB, which is arbitrarily
+ chosen to be larger than any "typical" wild pointer offset.
+ Again, no matter what the number is, round it up to a whole
+ number of pages. */
+ size_t guardsize = roundup (MAX (2 * stacksize, 1024 * 1024), pagesize);
+ size_t alloc_size = guardsize + stacksize + guardsize;
+ /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
+ bands; touch all the pages of the actual stack before returning,
+ so we know they are allocated. */
+ void *alloc_base = xmmap (0,
+ alloc_size,
+ PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
+ -1);
+ xmprotect (alloc_base, guardsize, PROT_NONE);
+ xmprotect (alloc_base + guardsize + stacksize, guardsize, PROT_NONE);
+ memset (alloc_base + guardsize, 0xA5, stacksize);
+ return (struct support_stack) { alloc_base + guardsize,
+ alloc_size - 2 * guardsize,
+ guardsize };
+}
+
+void
+support_stack_free (struct support_stack *stack)
+{
+ void *alloc_base = (void *)((uintptr_t) stack->stack - stack->guardsize);
+ size_t alloc_size = stack->size + 2 * stack->guardsize;
+ xmunmap (alloc_base, alloc_size);
+}
@@ -37,8 +37,7 @@
structures. */
struct sigstack_desc
{
- void *alloc_base; /* Base address of the complete allocation. */
- size_t alloc_size; /* Size of the complete allocation. */
+ struct support_stack stack;
stack_t alt_stack; /* The address and size of the stack itself. */
stack_t old_stack; /* The previous signal stack. */
};
@@ -46,43 +45,11 @@ struct sigstack_desc
void *
xalloc_sigstack (size_t size)
{
- size_t pagesize = sysconf (_SC_PAGESIZE);
- if (pagesize == -1)
- FAIL_EXIT1 ("sysconf (_SC_PAGESIZE): %m\n");
-
- /* Always supply at least MINSIGSTKSZ space; passing 0 as size means
- only that much space. No matter what the number is, round it up
- to a whole number of pages. */
- size_t stacksize = roundup (size + MINSIGSTKSZ, pagesize);
-
- /* The guard bands need to be large enough to intercept offset
- accesses from a stack address that might otherwise hit another
- mapping. Make them at least twice as big as the stack itself, to
- defend against an offset by the entire size of a large
- stack-allocated array. The minimum is 1MiB, which is arbitrarily
- chosen to be larger than any "typical" wild pointer offset.
- Again, no matter what the number is, round it up to a whole
- number of pages. */
- size_t guardsize = roundup (MAX (2 * stacksize, 1024 * 1024), pagesize);
-
struct sigstack_desc *desc = xmalloc (sizeof (struct sigstack_desc));
- desc->alloc_size = guardsize + stacksize + guardsize;
- /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
- bands; touch all the pages of the actual stack before returning,
- so we know they are allocated. */
- desc->alloc_base = xmmap (0,
- desc->alloc_size,
- PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
- -1);
-
- xmprotect (desc->alloc_base, guardsize, PROT_NONE);
- xmprotect (desc->alloc_base + guardsize + stacksize, guardsize, PROT_NONE);
- memset (desc->alloc_base + guardsize, 0xA5, stacksize);
-
- desc->alt_stack.ss_sp = desc->alloc_base + guardsize;
+ desc->stack = support_stack_alloc (size);
+ desc->alt_stack.ss_sp = desc->stack.stack;
desc->alt_stack.ss_flags = 0;
- desc->alt_stack.ss_size = stacksize;
+ desc->alt_stack.ss_size = desc->stack.size;
if (sigaltstack (&desc->alt_stack, &desc->old_stack))
FAIL_EXIT1 ("sigaltstack (new stack: sp=%p, size=%zu, flags=%u): %m\n",
@@ -101,7 +68,7 @@ xfree_sigstack (void *stack)
FAIL_EXIT1 ("sigaltstack (restore old stack: sp=%p, size=%zu, flags=%u): "
"%m\n", desc->old_stack.ss_sp, desc->old_stack.ss_size,
desc->old_stack.ss_flags);
- xmunmap (desc->alloc_base, desc->alloc_size);
+ support_stack_free (&desc->stack);
free (desc);
}