Small tcache improvements *

Message ID VI1PR0801MB212781C516A49C72A16597FE830C0@VI1PR0801MB2127.eurprd08.prod.outlook.com
State Superseded
Headers

Commit Message

Wilco Dijkstra May 10, 2019, 6:03 p.m. UTC
  Change the tcache->counts[] entries to uint16_t - this removes
the limit set by char and allows a larger tcache.  Since 65535
seems too large, limit MAX_TCACHE_COUNT to 1024.

bench-malloc-thread with 4 threads is ~15% faster on Cortex-A72.

OK for commit?

ChangeLog:
2019-05-10  Wilco Dijkstra  <wdijkstr@arm.com>

	* malloc/malloc.c (MAX_TCACHE_COUNT): Increase to 1024.
	(tcache_put): Remove redundant assert.
	(tcache_get): Remove redundant asserts.
	(__libc_malloc): Check tcache count is not zero.
	* manual/tunables.texi (glibc.malloc.tcache_count): Update maximum.
--
  

Comments

Wilco Dijkstra May 13, 2019, 1:13 p.m. UTC | #1
Hi DJ,

> +/* Maximum chunks in tcache bins for tunables.  This value must fit the range
> +   of tcache->counts[] entries, else they may overflow.  */
> +# define MAX_TCACHE_COUNT 1024

> Why an arbitrary limit?

There are diminishing returns so really large values make less sense. But I
can set it to the limit of the type if you prefer.

Wilco
  

Patch

diff --git a/malloc/malloc.c b/malloc/malloc.c
index b8baaa2706d8d274b04b86e27fc72716753530b0..597406854ddbab8706e487a9ff1e1994a2bb2e83 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -321,6 +321,10 @@  __malloc_assert (const char *assertion, const char *file, unsigned int line,
 /* This is another arbitrary limit, which tunables can change.  Each
    tcache bin will hold at most this number of chunks.  */
 # define TCACHE_FILL_COUNT 7
+
+/* Maximum chunks in tcache bins for tunables.  This value must fit the range
+   of tcache->counts[] entries, else they may overflow.  */
+# define MAX_TCACHE_COUNT 1024
 #endif
 
 
@@ -2901,12 +2905,10 @@  typedef struct tcache_entry
    time), this is for performance reasons.  */
 typedef struct tcache_perthread_struct
 {
-  char counts[TCACHE_MAX_BINS];
+  uint16_t counts[TCACHE_MAX_BINS];
   tcache_entry *entries[TCACHE_MAX_BINS];
 } tcache_perthread_struct;
 
-#define MAX_TCACHE_COUNT 127	/* Maximum value of counts[] entries.  */
-
 static __thread bool tcache_shutting_down = false;
 static __thread tcache_perthread_struct *tcache = NULL;
 
@@ -2916,7 +2918,6 @@  static __always_inline void
 tcache_put (mchunkptr chunk, size_t tc_idx)
 {
   tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
-  assert (tc_idx < TCACHE_MAX_BINS);
 
   /* Mark this chunk as "in the tcache" so the test in _int_free will
      detect a double free.  */
@@ -2933,8 +2934,6 @@  static __always_inline void *
 tcache_get (size_t tc_idx)
 {
   tcache_entry *e = tcache->entries[tc_idx];
-  assert (tc_idx < TCACHE_MAX_BINS);
-  assert (tcache->counts[tc_idx] > 0);
   tcache->entries[tc_idx] = e->next;
   --(tcache->counts[tc_idx]);
   e->key = NULL;
@@ -3046,9 +3045,8 @@  __libc_malloc (size_t bytes)
 
   DIAG_PUSH_NEEDS_COMMENT;
   if (tc_idx < mp_.tcache_bins
-      /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
       && tcache
-      && tcache->entries[tc_idx] != NULL)
+      && tcache->counts[tc_idx] > 0)
     {
       return tcache_get (tc_idx);
     }
diff --git a/manual/tunables.texi b/manual/tunables.texi
index ae638823a21b9cc7aca3684c8e3067cb8cd287e0..cc244512d6893f2abddb9f203ab0ff14838b00e2 100644
--- a/manual/tunables.texi
+++ b/manual/tunables.texi
@@ -189,7 +189,7 @@  per-thread cache.  The default (and maximum) value is 1032 bytes on
 
 @deftp Tunable glibc.malloc.tcache_count
 The maximum number of chunks of each size to cache. The default is 7.
-The upper limit is 127.  If set to zero, the per-thread cache is effectively
+The upper limit is 1024.  If set to zero, the per-thread cache is effectively
 disabled.
 
 The approximate maximum overhead of the per-thread cache is thus equal