[v2] malloc: send freed small chunks to smallbin

Message ID 5a2_xklfnO_qoJgnmLAlYontjxrkxJif4jr1hVxo4MetQOhj-P3KF-1_NbHLMsBGCWL2AMGcZaueC_TGIEWZYhsbNlISdWwcPNeuqUv-oCw=@proton.me (mailing list archive)
State New
Headers
Series [v2] malloc: send freed small chunks to smallbin |

Checks

Context Check Description
redhat-pt-bot/TryBot-apply_patch success Patch applied to master at the time it was sent
redhat-pt-bot/TryBot-32bit success Build for i686
linaro-tcwg-bot/tcwg_glibc_build--master-arm success Build passed
linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 success Build passed
linaro-tcwg-bot/tcwg_glibc_check--master-arm success Test passed
linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 success Test passed
redhat-pt-bot/TryBot-still_applies warning Patch no longer applies to master

Commit Message

k4lizen July 8, 2024, 5:51 a.m. UTC
  Large chunks get added to the unsorted bin since
sorting them takes time, for small chunks the
benefit of adding them to the unsorted bin is
non-existant, actually hurting performance.

Splitting and malloc_consolidate still add small
chunks to unsorted, but we can hint the compiler
that that is a relatively rare occurance.
Benchmarking shows this to be consistently good.
---
 malloc/malloc.c | 59 +++++++++++++++++++++++++++++++++----------------
 1 file changed, 40 insertions(+), 19 deletions(-)
  

Patch

diff --git a/malloc/malloc.c b/malloc/malloc.c
index bcb6e5b83c..ad77cd083e 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4156,9 +4156,9 @@  _int_malloc (mstate av, size_t bytes)
 #endif
             }
 
-          /* place chunk in bin */
-
-          if (in_smallbin_range (size))
+          /* Place chunk in bin. Only malloc_consolidate() and splitting can put
+      small chunks into the unsorted bin. */
+          if (__glibc_unlikely (in_smallbin_range (size)))
             {
               victim_index = smallbin_index (size);
               bck = bin_at (av, victim_index);
@@ -4723,23 +4723,45 @@  _int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size,
       } else
 	clear_inuse_bit_at_offset(nextchunk, 0);
 
-      /*
-	Place the chunk in unsorted chunk list. Chunks are
-	not placed into regular bins until after they have
-	been given one chance to be used in malloc.
-      */
+      mchunkptr bck, fwd;
 
-      mchunkptr bck = unsorted_chunks (av);
-      mchunkptr fwd = bck->fd;
-      if (__glibc_unlikely (fwd->bk != bck))
-	malloc_printerr ("free(): corrupted unsorted chunks");
-      p->fd = fwd;
+      if(!in_smallbin_range (size))
+        {
+          /*
+      Place large chunks in unsorted chunk list. Large chunks are
+      not placed into regular bins until after they have
+      been given one chance to be used in malloc.
+
+      This branch is first in the if-statement to help branch
+      prediction on consecutive adjacent frees.
+          */
+
+          bck = unsorted_chunks (av);
+          fwd = bck->fd;
+          if (__glibc_unlikely (fwd->bk != bck))
+      malloc_printerr ("free(): corrupted unsorted chunks");
+          p->fd_nextsize = NULL;
+          p->bk_nextsize = NULL;
+        }
+      else
+        {
+          /*
+      Place small chunks directly in their smallbin, so they
+      don't pollute the unsorted bin.
+          */
+
+          int chunk_index = smallbin_index (size);
+          bck = bin_at (av, chunk_index);
+          fwd = bck->fd;
+
+          if (__glibc_unlikely (fwd->bk != bck))
+      malloc_printerr ("free(): chunks in smallbin corrupted");
+
+          mark_bin (av, chunk_index);
+        }
+      
       p->bk = bck;
-      if (!in_smallbin_range(size))
-	{
-	  p->fd_nextsize = NULL;
-	  p->bk_nextsize = NULL;
-	}
+      p->fd = fwd;
       bck->fd = p;
       fwd->bk = p;
 
@@ -4748,7 +4770,6 @@  _int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size,
 
       check_free_chunk(av, p);
     }
-
   else
     {
       /* If the chunk borders the current high end of memory,