[v6,1/5] support: Add support_stack_alloc

Message ID 20210623185115.395812-2-adhemerval.zanella@linaro.org
State Superseded
Headers
Series Add close_range, closefrom, and posix_spawn_file_actions_closefrom_np |

Checks

Context Check Description
dj/TryBot-apply_patch success Patch applied to master at the time it was sent

Commit Message

Adhemerval Zanella June 23, 2021, 6:51 p.m. UTC
  The code to allocate a stack from xsigstack is refactored so it can
be more generic.
---
 support/Makefile              |  1 +
 support/support.h             | 19 +++++++++
 support/support_stack_alloc.c | 76 +++++++++++++++++++++++++++++++++++
 support/xsigstack.c           | 43 +++-----------------
 4 files changed, 101 insertions(+), 38 deletions(-)
 create mode 100644 support/support_stack_alloc.c
  

Comments

Florian Weimer June 24, 2021, 9:15 a.m. UTC | #1
* Adhemerval Zanella via Libc-alpha:

> diff --git a/support/support_stack_alloc.c b/support/support_stack_alloc.c
> new file mode 100644
> index 0000000000..08323f43d5
> --- /dev/null
> +++ b/support/support_stack_alloc.c
> @@ -0,0 +1,76 @@

> +  /* The guard bands need to be large enough to intercept offset
> +     accesses from a stack address that might otherwise hit another
> +     mapping.  Make them at least twice as big as the stack itself, to
> +     defend against an offset by the entire size of a large
> +     stack-allocated array.  The minimum is 1MiB, which is arbitrarily
> +     chosen to be larger than any "typical" wild pointer offset.
> +     Again, no matter what the number is, round it up to a whole
> +     number of pages.  */
> +  size_t guardsize = roundup (MAX (2 * stacksize, 1024 * 1024), pagesize);
> +  size_t alloc_size = guardsize + stacksize + guardsize;
> +  /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
> +     bands; touch all the pages of the actual stack before returning,
> +     so we know they are allocated.  */
> +  void *alloc_base = xmmap (0,
> +                            alloc_size,
> +                            PROT_NONE,
> +                            MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
> +                            -1);
> +  xmprotect (alloc_base + guardsize, stacksize, PROT_READ | PROT_WRITE);
> +  memset (alloc_base + guardsize, 0xA5, stacksize);
> +  return (struct support_stack) { alloc_base + guardsize,
> +                                  stacksize, guardsize };
> +}

Missing _STACK_GROWS_DOWN/_STACK_GROWS_UP support for guard location
handling, and missing executable stack handling (in case it's needed on
Hurd for trampolines; I'm not sure what the current state there is).

But I see it was already missing, so maybe that's not a big deal.

Thanks,
Florian
  
Adhemerval Zanella June 24, 2021, 11:33 a.m. UTC | #2
On 24/06/2021 06:15, Florian Weimer wrote:
> * Adhemerval Zanella via Libc-alpha:
> 
>> diff --git a/support/support_stack_alloc.c b/support/support_stack_alloc.c
>> new file mode 100644
>> index 0000000000..08323f43d5
>> --- /dev/null
>> +++ b/support/support_stack_alloc.c
>> @@ -0,0 +1,76 @@
> 
>> +  /* The guard bands need to be large enough to intercept offset
>> +     accesses from a stack address that might otherwise hit another
>> +     mapping.  Make them at least twice as big as the stack itself, to
>> +     defend against an offset by the entire size of a large
>> +     stack-allocated array.  The minimum is 1MiB, which is arbitrarily
>> +     chosen to be larger than any "typical" wild pointer offset.
>> +     Again, no matter what the number is, round it up to a whole
>> +     number of pages.  */
>> +  size_t guardsize = roundup (MAX (2 * stacksize, 1024 * 1024), pagesize);
>> +  size_t alloc_size = guardsize + stacksize + guardsize;
>> +  /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
>> +     bands; touch all the pages of the actual stack before returning,
>> +     so we know they are allocated.  */
>> +  void *alloc_base = xmmap (0,
>> +                            alloc_size,
>> +                            PROT_NONE,
>> +                            MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
>> +                            -1);
>> +  xmprotect (alloc_base + guardsize, stacksize, PROT_READ | PROT_WRITE);
>> +  memset (alloc_base + guardsize, 0xA5, stacksize);
>> +  return (struct support_stack) { alloc_base + guardsize,
>> +                                  stacksize, guardsize };
>> +}
> 
> Missing _STACK_GROWS_DOWN/_STACK_GROWS_UP support for guard location
> handling, and missing executable stack handling (in case it's needed on
> Hurd for trampolines; I'm not sure what the current state there is).
> 
> But I see it was already missing, so maybe that's not a big deal.

It seems a worth addition, I will update the patch.
  

Patch

diff --git a/support/Makefile b/support/Makefile
index 278f4627d8..f5410545a1 100644
--- a/support/Makefile
+++ b/support/Makefile
@@ -39,6 +39,7 @@  libsupport-routines = \
   resolv_response_context_free \
   resolv_test \
   set_fortify_handler \
+  support_stack_alloc \
   support-xfstat \
   support-xfstat-time64 \
   support-xstat \
diff --git a/support/support.h b/support/support.h
index 9ec8ecb8d7..dbd270c78d 100644
--- a/support/support.h
+++ b/support/support.h
@@ -164,6 +164,25 @@  timer_t support_create_timer (uint64_t sec, long int nsec, bool repeat,
 /* Disable the timer TIMER.  */
 void support_delete_timer (timer_t timer);
 
+struct support_stack
+{
+  void *stack;
+  size_t size;
+  size_t guardsize;
+};
+
+/* Allocate stack suitable to used with xclone or sigaltstack call. The stack
+   will have a minimum size of SIZE + MINSIGSTKSZ bytes, rounded up to a whole
+   number of pages.  There will be a large (at least 1 MiB) inaccessible guard
+   bands on either side of it.
+   The returned value on ALLOC_BASE and ALLOC_SIZE will be the usable stack
+   region, excluding the GUARD_SIZE allocated area.
+   It also terminates the process on error.  */
+struct support_stack support_stack_alloc (size_t size);
+
+/* Deallocate the STACK.  */
+void support_stack_free (struct support_stack *stack);
+
 __END_DECLS
 
 #endif /* SUPPORT_H */
diff --git a/support/support_stack_alloc.c b/support/support_stack_alloc.c
new file mode 100644
index 0000000000..08323f43d5
--- /dev/null
+++ b/support/support_stack_alloc.c
@@ -0,0 +1,76 @@ 
+/* Allocate a stack suitable to be used with xclone or xsigaltstack.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <support/check.h>
+#include <support/support.h>
+#include <support/xunistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/param.h> /* roundup, MAX  */
+
+#ifndef MAP_NORESERVE
+# define MAP_NORESERVE 0
+#endif
+#ifndef MAP_STACK
+# define MAP_STACK 0
+#endif
+
+struct support_stack
+support_stack_alloc (size_t size)
+{
+  size_t pagesize = sysconf (_SC_PAGESIZE);
+  if (pagesize == -1)
+    FAIL_EXIT1 ("sysconf (_SC_PAGESIZE): %m\n");
+
+  /* Always supply at least MINSIGSTKSZ space; passing 0 as size means
+     only that much space.  No matter what the number is, round it up
+     to a whole number of pages.  */
+  size_t stacksize = roundup (size + MINSIGSTKSZ, pagesize);
+
+  /* The guard bands need to be large enough to intercept offset
+     accesses from a stack address that might otherwise hit another
+     mapping.  Make them at least twice as big as the stack itself, to
+     defend against an offset by the entire size of a large
+     stack-allocated array.  The minimum is 1MiB, which is arbitrarily
+     chosen to be larger than any "typical" wild pointer offset.
+     Again, no matter what the number is, round it up to a whole
+     number of pages.  */
+  size_t guardsize = roundup (MAX (2 * stacksize, 1024 * 1024), pagesize);
+  size_t alloc_size = guardsize + stacksize + guardsize;
+  /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
+     bands; touch all the pages of the actual stack before returning,
+     so we know they are allocated.  */
+  void *alloc_base = xmmap (0,
+                            alloc_size,
+                            PROT_NONE,
+                            MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
+                            -1);
+  xmprotect (alloc_base + guardsize, stacksize, PROT_READ | PROT_WRITE);
+  memset (alloc_base + guardsize, 0xA5, stacksize);
+  return (struct support_stack) { alloc_base + guardsize,
+                                  stacksize, guardsize };
+}
+
+void
+support_stack_free (struct support_stack *stack)
+{
+  void *alloc_base = (void *)((uintptr_t) stack->stack - stack->guardsize);
+  size_t alloc_size = stack->size + 2 * stack->guardsize;
+  xmunmap (alloc_base, alloc_size);
+}
diff --git a/support/xsigstack.c b/support/xsigstack.c
index a2f0e3269a..a471c853cb 100644
--- a/support/xsigstack.c
+++ b/support/xsigstack.c
@@ -37,8 +37,7 @@ 
    structures.  */
 struct sigstack_desc
 {
-  void *alloc_base;  /* Base address of the complete allocation.  */
-  size_t alloc_size; /* Size of the complete allocation.  */
+  struct support_stack stack;
   stack_t alt_stack; /* The address and size of the stack itself.  */
   stack_t old_stack; /* The previous signal stack.  */
 };
@@ -46,43 +45,11 @@  struct sigstack_desc
 void *
 xalloc_sigstack (size_t size)
 {
-  size_t pagesize = sysconf (_SC_PAGESIZE);
-  if (pagesize == -1)
-    FAIL_EXIT1 ("sysconf (_SC_PAGESIZE): %m\n");
-
-  /* Always supply at least MINSIGSTKSZ space; passing 0 as size means
-     only that much space.  No matter what the number is, round it up
-     to a whole number of pages.  */
-  size_t stacksize = roundup (size + MINSIGSTKSZ, pagesize);
-
-  /* The guard bands need to be large enough to intercept offset
-     accesses from a stack address that might otherwise hit another
-     mapping.  Make them at least twice as big as the stack itself, to
-     defend against an offset by the entire size of a large
-     stack-allocated array.  The minimum is 1MiB, which is arbitrarily
-     chosen to be larger than any "typical" wild pointer offset.
-     Again, no matter what the number is, round it up to a whole
-     number of pages.  */
-  size_t guardsize = roundup (MAX (2 * stacksize, 1024 * 1024), pagesize);
-
   struct sigstack_desc *desc = xmalloc (sizeof (struct sigstack_desc));
-  desc->alloc_size = guardsize + stacksize + guardsize;
-  /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
-     bands; touch all the pages of the actual stack before returning,
-     so we know they are allocated.  */
-  desc->alloc_base = xmmap (0,
-                            desc->alloc_size,
-                            PROT_READ|PROT_WRITE,
-                            MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
-                            -1);
-
-  xmprotect (desc->alloc_base, guardsize, PROT_NONE);
-  xmprotect (desc->alloc_base + guardsize + stacksize, guardsize, PROT_NONE);
-  memset (desc->alloc_base + guardsize, 0xA5, stacksize);
-
-  desc->alt_stack.ss_sp    = desc->alloc_base + guardsize;
+  desc->stack = support_stack_alloc (size);
+  desc->alt_stack.ss_sp    = desc->stack.stack;
   desc->alt_stack.ss_flags = 0;
-  desc->alt_stack.ss_size  = stacksize;
+  desc->alt_stack.ss_size  = desc->stack.size;
 
   if (sigaltstack (&desc->alt_stack, &desc->old_stack))
     FAIL_EXIT1 ("sigaltstack (new stack: sp=%p, size=%zu, flags=%u): %m\n",
@@ -101,7 +68,7 @@  xfree_sigstack (void *stack)
     FAIL_EXIT1 ("sigaltstack (restore old stack: sp=%p, size=%zu, flags=%u): "
                 "%m\n", desc->old_stack.ss_sp, desc->old_stack.ss_size,
                 desc->old_stack.ss_flags);
-  xmunmap (desc->alloc_base, desc->alloc_size);
+  support_stack_free (&desc->stack);
   free (desc);
 }