[v5,7/7] malloc: Enable huge page support on main arena
Checks
Context |
Check |
Description |
dj/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
dj/TryBot-32bit |
success
|
Build for i686
|
Commit Message
This patch adds support huge page support on main arena allocation,
enable with tunable glibc.malloc.hugetlb=2. The patch essentially
disable the __glibc_morecore() sbrk() call (similar when memory
tag does when sbrk() call does not support it) and fallback to
default page size if the memory allocation fails.
Checked on x86_64-linux-gnu.
---
malloc/arena.c | 4 ++++
malloc/malloc.c | 12 ++++++++++--
malloc/morecore.c | 4 ----
3 files changed, 14 insertions(+), 6 deletions(-)
Comments
LGTM
Reviewed-by: DJ Delorie <dj@redhat.com>
Adhemerval Zanella via Libc-alpha <libc-alpha@sourceware.org> writes:
> diff --git a/malloc/arena.c b/malloc/arena.c
> # endif
> TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
> TUNABLE_GET (hugetlb, int32_t, TUNABLE_CALLBACK (set_hugetlb));
> + if (mp_.hp_pagesize > 0)
> + /* Force mmap for main arena instead of sbrk, so hugepages are explicitly
> + used. */
> + __always_fail_morecore = true;
Ok.
> diff --git a/malloc/malloc.c b/malloc/malloc.c
> */
>
> - char *mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize,
> - MMAP_AS_MORECORE_SIZE, 0, av);
> + char *mbrk = MAP_FAILED;
> +#if HAVE_TUNABLES
> + if (mp_.hp_pagesize > 0)
> + mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
> + mp_.hp_pagesize, mp_.hp_pagesize,
> + mp_.hp_flags, av);
> +#endif
> + if (mbrk == MAP_FAILED)
> + mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize,
> + MMAP_AS_MORECORE_SIZE, 0, av);
Ok.
> diff --git a/malloc/morecore.c b/malloc/morecore.c
> License along with the GNU C Library; if not, see
> <https://www.gnu.org/licenses/>. */
>
> -#if defined(SHARED) || defined(USE_MTAG)
> static bool __always_fail_morecore = false;
> -#endif
Ok.
> @@ -25,10 +23,8 @@ static bool __always_fail_morecore = false;
> void *
> __glibc_morecore (ptrdiff_t increment)
> {
> -#if defined(SHARED) || defined(USE_MTAG)
> if (__always_fail_morecore)
> return NULL;
> -#endif
Ok.
@@ -364,6 +364,10 @@ ptmalloc_init (void)
# endif
TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
TUNABLE_GET (hugetlb, int32_t, TUNABLE_CALLBACK (set_hugetlb));
+ if (mp_.hp_pagesize > 0)
+ /* Force mmap for main arena instead of sbrk, so hugepages are explicitly
+ used. */
+ __always_fail_morecore = true;
#else
if (__glibc_likely (_environ != NULL))
{
@@ -2740,8 +2740,16 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
segregated mmap region.
*/
- char *mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize,
- MMAP_AS_MORECORE_SIZE, 0, av);
+ char *mbrk = MAP_FAILED;
+#if HAVE_TUNABLES
+ if (mp_.hp_pagesize > 0)
+ mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
+ mp_.hp_pagesize, mp_.hp_pagesize,
+ mp_.hp_flags, av);
+#endif
+ if (mbrk == MAP_FAILED)
+ mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize,
+ MMAP_AS_MORECORE_SIZE, 0, av);
if (mbrk != MAP_FAILED)
{
/* We do not need, and cannot use, another sbrk call to find end */
@@ -15,9 +15,7 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
-#if defined(SHARED) || defined(USE_MTAG)
static bool __always_fail_morecore = false;
-#endif
/* Allocate INCREMENT more bytes of data space,
and return the start of data space, or NULL on errors.
@@ -25,10 +23,8 @@ static bool __always_fail_morecore = false;
void *
__glibc_morecore (ptrdiff_t increment)
{
-#if defined(SHARED) || defined(USE_MTAG)
if (__always_fail_morecore)
return NULL;
-#endif
void *result = (void *) __sbrk (increment);
if (result == (void *) -1)