[v2,3/4] malloc: Move mmap logic to its own function

Message ID 20210818142000.128752-4-adhemerval.zanella@linaro.org
State Superseded
Headers
Series malloc: Improve Huge Page support |

Checks

Context Check Description
dj/TryBot-apply_patch success Patch applied to master at the time it was sent

Commit Message

Adhemerval Zanella Netto Aug. 18, 2021, 2:19 p.m. UTC
  So it can be used with different pagesize and flags.
---
 malloc/malloc.c | 155 +++++++++++++++++++++++++-----------------------
 1 file changed, 82 insertions(+), 73 deletions(-)
  

Comments

Siddhesh Poyarekar Aug. 19, 2021, 12:47 a.m. UTC | #1
On 8/18/21 7:49 PM, Adhemerval Zanella via Libc-alpha wrote:
> So it can be used with different pagesize and flags.
> ---
>   malloc/malloc.c | 155 +++++++++++++++++++++++++-----------------------
>   1 file changed, 82 insertions(+), 73 deletions(-)
> 
> diff --git a/malloc/malloc.c b/malloc/malloc.c
> index 1a2c798a35..4bfcea286f 100644
> --- a/malloc/malloc.c
> +++ b/malloc/malloc.c
> @@ -2414,6 +2414,85 @@ do_check_malloc_state (mstate av)
>      be extended or replaced.
>    */
>   
> +static void *
> +sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
> +{
> +  long int size;
> +
> +  /*
> +    Round up size to nearest page.  For mmapped chunks, the overhead is one
> +    SIZE_SZ unit larger than for normal chunks, because there is no
> +    following chunk whose prev_size field could be used.
> +
> +    See the front_misalign handling below, for glibc there is no need for
> +    further alignments unless we have have high alignment.
> +   */
> +  if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
> +    size = ALIGN_UP (nb + SIZE_SZ, pagesize);
> +  else
> +    size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
> +
> +  /* Don't try if size wraps around 0.  */
> +  if ((unsigned long) (size) <= (unsigned long) (nb))
> +    return MAP_FAILED;
> +
> +  char *mm = (char *) MMAP (0, size,
> +			    mtag_mmap_flags | PROT_READ | PROT_WRITE,
> +			    extra_flags);
> +  if (mm == MAP_FAILED)
> +    return mm;
> +
> +  sysmadvise_thp (mm, size);
> +
> +  /*
> +    The offset to the start of the mmapped region is stored in the prev_size
> +    field of the chunk.  This allows us to adjust returned start address to
> +    meet alignment requirements here and in memalign(), and still be able to
> +    compute proper address argument for later munmap in free() and realloc().
> +   */
> +
> +  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
> +
> +  if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
> +    {
> +      /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
> +	 MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1.  Each mmap'ed area is page
> +	 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
> +      assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
> +      front_misalign = 0;
> +    }
> +  else
> +    front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
> +
> +  mchunkptr p;                    /* the allocated/returned chunk */
> +
> +  if (front_misalign > 0)
> +    {
> +      ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
> +      p = (mchunkptr) (mm + correction);
> +      set_prev_size (p, correction);
> +      set_head (p, (size - correction) | IS_MMAPPED);
> +    }
> +  else
> +    {
> +      p = (mchunkptr) mm;
> +      set_prev_size (p, 0);
> +      set_head (p, size | IS_MMAPPED);
> +    }
> +
> +  /* update statistics */
> +  int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
> +  atomic_max (&mp_.max_n_mmaps, new);
> +
> +  unsigned long sum;
> +  sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
> +  atomic_max (&mp_.max_mmapped_mem, sum);
> +
> +  check_chunk (av, p);
> +
> +  return chunk2mem (p);
> +}
> +
>   static void *
>   sysmalloc (INTERNAL_SIZE_T nb, mstate av)
>   {
> @@ -2451,81 +2530,11 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
>         || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
>   	  && (mp_.n_mmaps < mp_.n_mmaps_max)))
>       {
> -      char *mm;           /* return value from mmap call*/
> -
>       try_mmap:

This is a great opportunity to get rid of this goto.

> -      /*
> -         Round up size to nearest page.  For mmapped chunks, the overhead
> -         is one SIZE_SZ unit larger than for normal chunks, because there
> -         is no following chunk whose prev_size field could be used.
> -
> -         See the front_misalign handling below, for glibc there is no
> -         need for further alignments unless we have have high alignment.
> -       */
> -      if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
> -        size = ALIGN_UP (nb + SIZE_SZ, pagesize);
> -      else
> -        size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
> +      char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
> +      if (mm != MAP_FAILED)
> +	return mm;
>         tried_mmap = true;
> -
> -      /* Don't try if size wraps around 0 */
> -      if ((unsigned long) (size) > (unsigned long) (nb))
> -        {
> -          mm = (char *) (MMAP (0, size,
> -			       mtag_mmap_flags | PROT_READ | PROT_WRITE, 0));
> -
> -          if (mm != MAP_FAILED)
> -            {
> -	      sysmadvise_thp (mm, size);
> -
> -              /*
> -                 The offset to the start of the mmapped region is stored
> -                 in the prev_size field of the chunk. This allows us to adjust
> -                 returned start address to meet alignment requirements here
> -                 and in memalign(), and still be able to compute proper
> -                 address argument for later munmap in free() and realloc().
> -               */
> -
> -              if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
> -                {
> -                  /* For glibc, chunk2mem increases the address by
> -                     CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is
> -                     CHUNK_HDR_SZ-1.  Each mmap'ed area is page
> -                     aligned and therefore definitely
> -                     MALLOC_ALIGN_MASK-aligned.  */
> -                  assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
> -                  front_misalign = 0;
> -                }
> -              else
> -                front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
> -              if (front_misalign > 0)
> -                {
> -                  correction = MALLOC_ALIGNMENT - front_misalign;
> -                  p = (mchunkptr) (mm + correction);
> -		  set_prev_size (p, correction);
> -                  set_head (p, (size - correction) | IS_MMAPPED);
> -                }
> -              else
> -                {
> -                  p = (mchunkptr) mm;
> -		  set_prev_size (p, 0);
> -                  set_head (p, size | IS_MMAPPED);
> -                }
> -
> -              /* update statistics */
> -
> -              int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
> -              atomic_max (&mp_.max_n_mmaps, new);
> -
> -              unsigned long sum;
> -              sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
> -              atomic_max (&mp_.max_mmapped_mem, sum);
> -
> -              check_chunk (av, p);
> -
> -              return chunk2mem (p);
> -            }
> -        }
>       }
>   
>     /* There are no usable arenas and mmap also failed.  */
>
  

Patch

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 1a2c798a35..4bfcea286f 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2414,6 +2414,85 @@  do_check_malloc_state (mstate av)
    be extended or replaced.
  */
 
+static void *
+sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
+{
+  long int size;
+
+  /*
+    Round up size to nearest page.  For mmapped chunks, the overhead is one
+    SIZE_SZ unit larger than for normal chunks, because there is no
+    following chunk whose prev_size field could be used.
+
+    See the front_misalign handling below, for glibc there is no need for
+    further alignments unless we have have high alignment.
+   */
+  if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
+    size = ALIGN_UP (nb + SIZE_SZ, pagesize);
+  else
+    size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
+
+  /* Don't try if size wraps around 0.  */
+  if ((unsigned long) (size) <= (unsigned long) (nb))
+    return MAP_FAILED;
+
+  char *mm = (char *) MMAP (0, size,
+			    mtag_mmap_flags | PROT_READ | PROT_WRITE,
+			    extra_flags);
+  if (mm == MAP_FAILED)
+    return mm;
+
+  sysmadvise_thp (mm, size);
+
+  /*
+    The offset to the start of the mmapped region is stored in the prev_size
+    field of the chunk.  This allows us to adjust returned start address to
+    meet alignment requirements here and in memalign(), and still be able to
+    compute proper address argument for later munmap in free() and realloc().
+   */
+
+  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
+
+  if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
+    {
+      /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
+	 MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1.  Each mmap'ed area is page
+	 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
+      assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
+      front_misalign = 0;
+    }
+  else
+    front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
+
+  mchunkptr p;                    /* the allocated/returned chunk */
+
+  if (front_misalign > 0)
+    {
+      ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
+      p = (mchunkptr) (mm + correction);
+      set_prev_size (p, correction);
+      set_head (p, (size - correction) | IS_MMAPPED);
+    }
+  else
+    {
+      p = (mchunkptr) mm;
+      set_prev_size (p, 0);
+      set_head (p, size | IS_MMAPPED);
+    }
+
+  /* update statistics */
+  int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
+  atomic_max (&mp_.max_n_mmaps, new);
+
+  unsigned long sum;
+  sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
+  atomic_max (&mp_.max_mmapped_mem, sum);
+
+  check_chunk (av, p);
+
+  return chunk2mem (p);
+}
+
 static void *
 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
 {
@@ -2451,81 +2530,11 @@  sysmalloc (INTERNAL_SIZE_T nb, mstate av)
       || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
 	  && (mp_.n_mmaps < mp_.n_mmaps_max)))
     {
-      char *mm;           /* return value from mmap call*/
-
     try_mmap:
-      /*
-         Round up size to nearest page.  For mmapped chunks, the overhead
-         is one SIZE_SZ unit larger than for normal chunks, because there
-         is no following chunk whose prev_size field could be used.
-
-         See the front_misalign handling below, for glibc there is no
-         need for further alignments unless we have have high alignment.
-       */
-      if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
-        size = ALIGN_UP (nb + SIZE_SZ, pagesize);
-      else
-        size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
+      char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
+      if (mm != MAP_FAILED)
+	return mm;
       tried_mmap = true;
-
-      /* Don't try if size wraps around 0 */
-      if ((unsigned long) (size) > (unsigned long) (nb))
-        {
-          mm = (char *) (MMAP (0, size,
-			       mtag_mmap_flags | PROT_READ | PROT_WRITE, 0));
-
-          if (mm != MAP_FAILED)
-            {
-	      sysmadvise_thp (mm, size);
-
-              /*
-                 The offset to the start of the mmapped region is stored
-                 in the prev_size field of the chunk. This allows us to adjust
-                 returned start address to meet alignment requirements here
-                 and in memalign(), and still be able to compute proper
-                 address argument for later munmap in free() and realloc().
-               */
-
-              if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
-                {
-                  /* For glibc, chunk2mem increases the address by
-                     CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is
-                     CHUNK_HDR_SZ-1.  Each mmap'ed area is page
-                     aligned and therefore definitely
-                     MALLOC_ALIGN_MASK-aligned.  */
-                  assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
-                  front_misalign = 0;
-                }
-              else
-                front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
-              if (front_misalign > 0)
-                {
-                  correction = MALLOC_ALIGNMENT - front_misalign;
-                  p = (mchunkptr) (mm + correction);
-		  set_prev_size (p, correction);
-                  set_head (p, (size - correction) | IS_MMAPPED);
-                }
-              else
-                {
-                  p = (mchunkptr) mm;
-		  set_prev_size (p, 0);
-                  set_head (p, size | IS_MMAPPED);
-                }
-
-              /* update statistics */
-
-              int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
-              atomic_max (&mp_.max_n_mmaps, new);
-
-              unsigned long sum;
-              sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
-              atomic_max (&mp_.max_mmapped_mem, sum);
-
-              check_chunk (av, p);
-
-              return chunk2mem (p);
-            }
-        }
     }
 
   /* There are no usable arenas and mmap also failed.  */