malloc: Introduce additional accessors for struct malloc_chunk

Message ID 874lckkiqo.fsf@oldenburg.str.redhat.com
State New, archived
Headers

Commit Message

Florian Weimer Nov. 13, 2018, 5:08 p.m. UTC
  DJ requested more accessor macros for struct malloc_chunk, so I updated
my existing patch.

The only substantial difference comes from the conversion of this line
in _int_malloc:

  fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;

But I think the change is okay.  The reset does not result in material
changes to the generated assembler code on x86-64 with -DNDEBUG.

Thanks,
Florian

2018-11-13  Florian Weimer  <fweimer@redhat.com>

	malloc: Introduce additional accessors for struct malloc_chunk.
	* malloc/malloc.c (struct malloc_chunk): Rename fd to mchunk_fd,
	bk to mchunk_bk, fd_nextsize to mchunk_fd_nextsize, bk_nextsize to
	mchunk_bk_nextsize.  Update comments.
	(MIN_CHUNK_SIZE): Adjust.
	(malloc_chunk_fd, set_malloc_chunk_fd, malloc_chunk_bk)
	(set_malloc_chunk_bk, malloc_chunk_fd_nextsize)
	(set_malloc_chunk_fd_nextsize, malloc_chunk_bk_nextsize)
	(set_malloc_chunk_bk_nextsize, MCHUNK_FD_OFFSET): New macros.
	(mchunk_fd, mchunk_bk, mchunk_fd_nextsize, mchunk_bk_nextsize):
	Poison identifiers.
	(bin_at): Use MCHUNK_FD_OFFSET.
	(first): Use malloc_chunk_fd.
	(last): Use malloc_chunk_bk.
	(unlink_chunk, malloc_init_state, do_check_free_chunk)
	(do_check_malloc_state, _int_malloc, _int_free)
	(malloc_consolidate, mtrim, int_mallinfo, __malloc_info): Use new
	accessors.
  

Patch

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 6d7a6a8cab..8d3d2ff1bb 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1046,12 +1046,16 @@  struct malloc_chunk {
   INTERNAL_SIZE_T      mchunk_prev_size;  /* Size of previous chunk (if free).  */
   INTERNAL_SIZE_T      mchunk_size;       /* Size in bytes, including overhead. */
 
-  struct malloc_chunk* fd;         /* double links -- used only if free. */
-  struct malloc_chunk* bk;
+  /* Double links.  Used if free.  mchunk_fd is also used for the
+     fastbin list (which is allocated from the point of view of the
+     lower-level allocator.  */
+  struct malloc_chunk* mchunk_fd;
+  struct malloc_chunk* mchunk_bk;
 
-  /* Only used for large blocks: pointer to next larger size.  */
-  struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
-  struct malloc_chunk* bk_nextsize;
+  /* Only used for large free blocks (those not in_smallbin_range):
+     pointer to next larger size.  */
+  struct malloc_chunk* mchunk_fd_nextsize;
+  struct malloc_chunk* mchunk_bk_nextsize;
 };
 
 
@@ -1172,7 +1176,7 @@  nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
 
 /* The smallest possible chunk */
-#define MIN_CHUNK_SIZE        (offsetof(struct malloc_chunk, fd_nextsize))
+#define MIN_CHUNK_SIZE        (offsetof(struct malloc_chunk, mchunk_fd_nextsize))
 
 /* The smallest size we can malloc is an aligned minimal chunk */
 
@@ -1315,6 +1319,20 @@  nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 /* Set size at footer (only when chunk is not in use) */
 #define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
 
+/* Abstraction over member access.  */
+#define malloc_chunk_fd(chunk) ((chunk)->mchunk_fd)
+#define set_malloc_chunk_fd(chunk, value) ((chunk)->mchunk_fd = (value))
+#define malloc_chunk_bk(chunk) ((chunk)->mchunk_bk)
+#define set_malloc_chunk_bk(chunk, value) ((chunk)->mchunk_bk = (value))
+#define malloc_chunk_fd_nextsize(chunk) ((chunk)->mchunk_fd_nextsize)
+#define set_malloc_chunk_fd_nextsize(chunk, value) \
+  ((chunk)->mchunk_fd_nextsize = (value))
+#define malloc_chunk_bk_nextsize(chunk) ((chunk)->mchunk_bk_nextsize)
+#define set_malloc_chunk_bk_nextsize(chunk, value) \
+  ((chunk)->mchunk_bk_nextsize = (value))
+
+/* For use in bin_at below.  */
+#define MCHUNK_FD_OFFSET (offsetof (struct malloc_chunk, mchunk_fd))
 
 /* Add an item to the atomic fastbin list at *ROOT.  Returns the old
    value at *ROOT.  Note that properties of the old chunk are only
@@ -1331,7 +1349,7 @@  fastbin_push_entry (struct malloc_chunk **root, struct malloc_chunk *e)
       head = *root;
       if (head == e)
 	malloc_printerr ("double free or corruption (fasttop)");
-      e->fd = head;
+      e->mchunk_fd = head;
       *root = e;
     }
   else
@@ -1347,9 +1365,9 @@  fastbin_push_entry (struct malloc_chunk **root, struct malloc_chunk *e)
 	   going to add (i.e., double free).  */
 	if (head == e)
 	  malloc_printerr ("double free or corruption (fasttop)");
-	e->fd = head;
+	e->mchunk_fd = head;
       }
-  /* Synchronizes with the acquire MO CAS in  */
+  /* Synchronizes with the acquire MO CAS in fastbin_pop_entry.  */
     while (!atomic_compare_exchange_weak_release (root, &head, e));
   return head;
 }
@@ -1364,7 +1382,7 @@  fastbin_pop_entry (struct malloc_chunk **root)
     {
       head = *root;
       if (head != NULL)
-	*root = head->fd;
+	*root = head->mchunk_fd;
     }
   else
     {
@@ -1377,7 +1395,7 @@  fastbin_pop_entry (struct malloc_chunk **root)
 	{
 	  if (head == NULL)
 	    return NULL;
-	  tail = head->fd;
+	  tail = head->mchunk_fd;
 	}
       /* Synchronizes with the release MO store in fastbin_push_entry.
 	 We do not have an ABA issue here because the caller has
@@ -1388,8 +1406,14 @@  fastbin_pop_entry (struct malloc_chunk **root)
   return head;
 }
 
+
+/* Disallow direct access to the struct fields.  */
 #pragma GCC poison mchunk_size
 #pragma GCC poison mchunk_prev_size
+#pragma GCC poison mchunk_fd
+#pragma GCC poison mchunk_bk
+#pragma GCC poison mchunk_fd_nextsize
+#pragma GCC poison mchunk_bk_nextsize
 
 /*
    -------------------- Internal data structures --------------------
@@ -1446,15 +1470,14 @@  typedef struct malloc_chunk *mbinptr;
 
 /* addressing -- note that bin_at(0) does not exist */
 #define bin_at(m, i) \
-  (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))			      \
-             - offsetof (struct malloc_chunk, fd))
+  (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) - MCHUNK_FD_OFFSET)
 
 /* analog of ++bin */
 #define next_bin(b)  ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
 
 /* Reminders about list directionality within bins */
-#define first(b)     ((b)->fd)
-#define last(b)      ((b)->bk)
+#define first(b)     malloc_chunk_fd (b)
+#define last(b)      malloc_chunk_bk (b)
 
 /*
    Indexing
@@ -1535,36 +1558,42 @@  unlink_chunk (mstate av, mchunkptr p)
   if (chunksize (p) != prev_size (next_chunk (p)))
     malloc_printerr ("corrupted size vs. prev_size");
 
-  mchunkptr fd = p->fd;
-  mchunkptr bk = p->bk;
+  struct malloc_chunk *fd = malloc_chunk_fd (p);
+  struct malloc_chunk *bk = malloc_chunk_bk (p);
 
-  if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
+  if (__builtin_expect (malloc_chunk_bk (fd) != p || malloc_chunk_fd (bk) != p, 0))
     malloc_printerr ("corrupted double-linked list");
 
-  fd->bk = bk;
-  bk->fd = fd;
-  if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
+  set_malloc_chunk_bk (fd, bk);
+  set_malloc_chunk_fd (bk, fd);
+  if (!in_smallbin_range (chunksize_nomask (p))
+      && malloc_chunk_fd_nextsize (p) != NULL)
     {
-      if (p->fd_nextsize->bk_nextsize != p
-	  || p->bk_nextsize->fd_nextsize != p)
+      struct malloc_chunk *p_fd_nextsize = malloc_chunk_fd_nextsize (p);
+      struct malloc_chunk *p_bk_nextsize = malloc_chunk_bk_nextsize (p);
+      if (malloc_chunk_bk_nextsize (p_fd_nextsize) != p
+	  || malloc_chunk_fd_nextsize (p_bk_nextsize) != p)
 	malloc_printerr ("corrupted double-linked list (not small)");
 
-      if (fd->fd_nextsize == NULL)
+      if (malloc_chunk_fd_nextsize (fd) == NULL)
 	{
-	  if (p->fd_nextsize == p)
-	    fd->fd_nextsize = fd->bk_nextsize = fd;
+	  if (p_fd_nextsize == p)
+	    {
+	      set_malloc_chunk_fd_nextsize (fd, fd);
+	      set_malloc_chunk_bk_nextsize (fd, fd);
+	    }
 	  else
 	    {
-	      fd->fd_nextsize = p->fd_nextsize;
-	      fd->bk_nextsize = p->bk_nextsize;
-	      p->fd_nextsize->bk_nextsize = fd;
-	      p->bk_nextsize->fd_nextsize = fd;
+	      set_malloc_chunk_fd_nextsize (fd, p_fd_nextsize);
+	      set_malloc_chunk_bk_nextsize (fd, p_bk_nextsize);
+	      set_malloc_chunk_bk_nextsize (p_fd_nextsize, fd);
+	      set_malloc_chunk_fd_nextsize (p_bk_nextsize, fd);
 	    }
 	}
       else
 	{
-	  p->fd_nextsize->bk_nextsize = p->bk_nextsize;
-	  p->bk_nextsize->fd_nextsize = p->fd_nextsize;
+	  set_malloc_chunk_bk_nextsize (p_fd_nextsize, p_bk_nextsize);
+	  set_malloc_chunk_fd_nextsize (p_bk_nextsize, p_fd_nextsize);
 	}
     }
 }
@@ -1877,7 +1906,8 @@  malloc_init_state (mstate av)
   for (i = 1; i < NBINS; ++i)
     {
       bin = bin_at (av, i);
-      bin->fd = bin->bk = bin;
+      set_malloc_chunk_fd (bin, bin);
+      set_malloc_chunk_bk (bin, bin);
     }
 
 #if MORECORE_CONTIGUOUS
@@ -2066,8 +2096,8 @@  do_check_free_chunk (mstate av, mchunkptr p)
       assert (next == av->top || inuse (next));
 
       /* ... and has minimally sane links */
-      assert (p->fd->bk == p);
-      assert (p->bk->fd == p);
+      assert (malloc_chunk_bk (malloc_chunk_fd (p) == p);
+      assert (malloc_chunk_fd (malloc_chunk_bk (p) == p);
     }
   else /* markers are always of size SIZE_SZ */
     assert (sz == SIZE_SZ);
@@ -2244,7 +2274,7 @@  do_check_malloc_state (mstate av)
           total += chunksize (p);
           /* chunk belongs in this bin */
           assert (fastbin_index (chunksize (p)) == i);
-          p = p->fd;
+          p = malloc_chunk_fd (fd);
         }
     }
 
@@ -2264,7 +2294,7 @@  do_check_malloc_state (mstate av)
             assert (binbit);
         }
 
-      for (p = last (b); p != b; p = p->bk)
+      for (p = last (b); p != b; p = malloc_chunk_bk (p))
         {
           /* each chunk claims to be free */
           do_check_free_chunk (av, p);
@@ -2276,34 +2306,42 @@  do_check_malloc_state (mstate av)
               idx = bin_index (size);
               assert (idx == i);
               /* lists are sorted */
-              assert (p->bk == b ||
-                      (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
+              assert (malloc_chunk_bk (p) == b
+		      || ((unsigned long) chunksize (malloc_chunk_bk (p))
+			  >= (unsigned long) chunksize (p)));
 
               if (!in_smallbin_range (size))
                 {
-                  if (p->fd_nextsize != NULL)
+                  if (malloc_chunk_fd_nextsize (p) != NULL)
                     {
-                      if (p->fd_nextsize == p)
-                        assert (p->bk_nextsize == p);
+                      if (malloc_chunk_fd_nextsize (p) == p)
+                        assert (malloc_chunk_bk_nextsize (p) == p);
                       else
                         {
-                          if (p->fd_nextsize == first (b))
-                            assert (chunksize (p) < chunksize (p->fd_nextsize));
+                          if (malloc_chunk_fd_nextsize (p) == first (b))
+                            assert (chunksize (p)
+				    < chunksize (mchunk_fd_nextsize (p)));
                           else
-                            assert (chunksize (p) > chunksize (p->fd_nextsize));
+                            assert (chunksize (p)
+				    > chunksize (mchunk_fd_nextsize (p)));
 
                           if (p == first (b))
-                            assert (chunksize (p) > chunksize (p->bk_nextsize));
+                            assert (chunksize (p)
+				    > chunksize (mchunk_bk_nextsize (p)));
                           else
-                            assert (chunksize (p) < chunksize (p->bk_nextsize));
+                            assert (chunksize (p)
+				    < chunksize (mchunk_bk_nextsize (p)));
                         }
                     }
                   else
-                    assert (p->bk_nextsize == NULL);
+                    assert (mchunk_bk_nextsize (p) == NULL);
                 }
             }
           else if (!in_smallbin_range (size))
-            assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
+	    {
+	      assert (mchunk_fd_nextsize (p) == NULL);
+	      assert (mchunk_bk_nextsize (p) == NULL);
+	    }
           /* chunk is followed by a legal chain of inuse chunks */
           for (q = next_chunk (p);
                (q != av->top && inuse (q) &&
@@ -3679,12 +3717,12 @@  _int_malloc (mstate av, size_t bytes)
 
       if ((victim = last (bin)) != bin)
         {
-          bck = victim->bk;
-	  if (__glibc_unlikely (bck->fd != victim))
+          bck = malloc_chunk_bk (victim);
+	  if (__glibc_unlikely (malloc_chunk_fd (bck) != victim))
 	    malloc_printerr ("malloc(): smallbin double linked list corrupted");
           set_inuse_bit_at_offset (victim, nb);
-          bin->bk = bck;
-          bck->fd = bin;
+	  set_malloc_chunk_bk (bin, bck);
+	  set_malloc_chunk_fd (bck, bin);
 
           if (av != &main_arena)
 	    set_non_main_arena (victim);
@@ -3703,12 +3741,12 @@  _int_malloc (mstate av, size_t bytes)
 		{
 		  if (tc_victim != 0)
 		    {
-		      bck = tc_victim->bk;
+		      bck = malloc_chunk_bk (tc_victim);
 		      set_inuse_bit_at_offset (tc_victim, nb);
 		      if (av != &main_arena)
 			set_non_main_arena (tc_victim);
-		      bin->bk = bck;
-		      bck->fd = bin;
+		      set_malloc_chunk_bk (bin, bck);
+		      set_malloc_chunk_fd (bck, bin);
 
 		      tcache_put (tc_victim, tc_idx);
 	            }
@@ -3765,9 +3803,10 @@  _int_malloc (mstate av, size_t bytes)
   for (;; )
     {
       int iters = 0;
-      while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
+      while ((victim = malloc_chunk_bk (unsorted_chunks (av)))
+	     != unsorted_chunks (av))
         {
-          bck = victim->bk;
+          bck = malloc_chunk_bk (victim);
           size = chunksize (victim);
           mchunkptr next = chunk_at_offset (victim, size);
 
@@ -3779,8 +3818,9 @@  _int_malloc (mstate av, size_t bytes)
             malloc_printerr ("malloc(): invalid next size (unsorted)");
           if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
             malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
-          if (__glibc_unlikely (bck->fd != victim)
-              || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
+          if (__glibc_unlikely (malloc_chunk_fd (bck) != victim)
+              || __glibc_unlikely (malloc_chunk_fd (victim)
+				   != unsorted_chunks (av)))
             malloc_printerr ("malloc(): unsorted double linked list corrupted");
           if (__glibc_unlikely (prev_inuse (next)))
             malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
@@ -3801,13 +3841,15 @@  _int_malloc (mstate av, size_t bytes)
               /* split and reattach remainder */
               remainder_size = size - nb;
               remainder = chunk_at_offset (victim, nb);
-              unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
+	      set_malloc_chunk_fd (unsorted_chunks (av), remainder);
+              set_malloc_chunk_bk (unsorted_chunks (av), remainder);
               av->last_remainder = remainder;
-              remainder->bk = remainder->fd = unsorted_chunks (av);
+	      set_malloc_chunk_fd (remainder, unsorted_chunks (av));
+	      set_malloc_chunk_bk (remainder, unsorted_chunks (av));
               if (!in_smallbin_range (remainder_size))
                 {
-                  remainder->fd_nextsize = NULL;
-                  remainder->bk_nextsize = NULL;
+                  set_malloc_chunk_fd_nextsize (remainder, NULL);
+                  set_malloc_chunk_bk_nextsize (remainder, NULL);
                 }
 
               set_head (victim, nb | PREV_INUSE |
@@ -3822,10 +3864,10 @@  _int_malloc (mstate av, size_t bytes)
             }
 
           /* remove from unsorted list */
-          if (__glibc_unlikely (bck->fd != victim))
+          if (__glibc_unlikely (malloc_chunk_fd (bck) != victim))
             malloc_printerr ("malloc(): corrupted unsorted chunks 3");
-          unsorted_chunks (av)->bk = bck;
-          bck->fd = unsorted_chunks (av);
+          set_malloc_chunk_bk (unsorted_chunks (av), bck);
+          set_malloc_chunk_fd (bck, unsorted_chunks (av));
 
           /* Take now instead of binning if exact fit */
 
@@ -3862,13 +3904,13 @@  _int_malloc (mstate av, size_t bytes)
             {
               victim_index = smallbin_index (size);
               bck = bin_at (av, victim_index);
-              fwd = bck->fd;
+              fwd = malloc_chunk_fd (bck);
             }
           else
             {
               victim_index = largebin_index (size);
               bck = bin_at (av, victim_index);
-              fwd = bck->fd;
+              fwd = malloc_chunk_fd (bck);
 
               /* maintain large bins in sorted order */
               if (fwd != bck)
@@ -3876,49 +3918,61 @@  _int_malloc (mstate av, size_t bytes)
                   /* Or with inuse bit to speed comparisons */
                   size |= PREV_INUSE;
                   /* if smaller than smallest, bypass loop below */
-                  assert (chunk_main_arena (bck->bk));
+                  assert (chunk_main_arena (malloc_chunk_bk (bck)));
                   if ((unsigned long) (size)
-		      < (unsigned long) chunksize_nomask (bck->bk))
+		      < ((unsigned long) chunksize_nomask
+			 (malloc_chunk_bk (bck))))
                     {
                       fwd = bck;
-                      bck = bck->bk;
+                      bck = malloc_chunk_bk (bck);
 
-                      victim->fd_nextsize = fwd->fd;
-                      victim->bk_nextsize = fwd->fd->bk_nextsize;
-                      fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
+                      set_malloc_chunk_fd_nextsize
+                        (victim, malloc_chunk_fd (fwd));
+                      set_malloc_chunk_bk_nextsize
+                        (victim,
+                         malloc_chunk_bk_nextsize (malloc_chunk_fd (fwd)));
+                      set_malloc_chunk_bk_nextsize
+			(malloc_chunk_fd (fwd), victim);
+                      set_malloc_chunk_fd_nextsize
+                        (malloc_chunk_bk_nextsize (victim), victim);
                     }
                   else
                     {
                       assert (chunk_main_arena (fwd));
                       while ((unsigned long) size < chunksize_nomask (fwd))
                         {
-                          fwd = fwd->fd_nextsize;
+                          fwd = malloc_chunk_fd_nextsize (fwd);
 			  assert (chunk_main_arena (fwd));
                         }
 
                       if ((unsigned long) size
 			  == (unsigned long) chunksize_nomask (fwd))
                         /* Always insert in the second position.  */
-                        fwd = fwd->fd;
+                        fwd = malloc_chunk_fd (fwd);
                       else
                         {
-                          victim->fd_nextsize = fwd;
-                          victim->bk_nextsize = fwd->bk_nextsize;
-                          fwd->bk_nextsize = victim;
-                          victim->bk_nextsize->fd_nextsize = victim;
+			  set_malloc_chunk_fd_nextsize (victim, fwd);
+			  set_malloc_chunk_bk_nextsize
+			    (victim, malloc_chunk_bk_nextsize (fwd));
+			  set_malloc_chunk_bk_nextsize (fwd, victim);
+			  set_malloc_chunk_fd_nextsize
+			    (malloc_chunk_bk_nextsize (victim), victim);
                         }
-                      bck = fwd->bk;
+                      bck = malloc_chunk_bk (fwd);
                     }
                 }
               else
-                victim->fd_nextsize = victim->bk_nextsize = victim;
-            }
+		{
+		  set_malloc_chunk_fd_nextsize (victim, victim);
+		  set_malloc_chunk_bk_nextsize (victim, victim);
+		}
+	    }
 
           mark_bin (av, victim_index);
-          victim->bk = bck;
-          victim->fd = fwd;
-          fwd->bk = victim;
-          bck->fd = victim;
+	  set_malloc_chunk_bk (victim, bck);
+	  set_malloc_chunk_fd (victim, fwd);
+	  set_malloc_chunk_bk (fwd, victim);
+	  set_malloc_chunk_fd (bck, victim);
 
 #if USE_TCACHE
       /* If we've processed as many chunks as we're allowed while
@@ -3959,17 +4013,17 @@  _int_malloc (mstate av, size_t bytes)
 	      && (unsigned long) chunksize_nomask (victim)
 	        >= (unsigned long) (nb))
             {
-              victim = victim->bk_nextsize;
+              victim = malloc_chunk_bk_nextsize (victim);
               while (((unsigned long) (size = chunksize (victim)) <
                       (unsigned long) (nb)))
-                victim = victim->bk_nextsize;
+                victim = malloc_chunk_bk_nextsize (victim);
 
               /* Avoid removing the first entry for a size so that the skip
                  list does not have to be rerouted.  */
               if (victim != last (bin)
 		  && chunksize_nomask (victim)
-		    == chunksize_nomask (victim->fd))
-                victim = victim->fd;
+		    == chunksize_nomask (malloc_chunk_fd (victim)))
+                victim = malloc_chunk_fd (victim);
 
               remainder_size = size - nb;
               unlink_chunk (av, victim);
@@ -3988,17 +4042,17 @@  _int_malloc (mstate av, size_t bytes)
                   /* We cannot assume the unsorted list is empty and therefore
                      have to perform a complete insert here.  */
                   bck = unsorted_chunks (av);
-                  fwd = bck->fd;
-		  if (__glibc_unlikely (fwd->bk != bck))
+                  fwd = malloc_chunk_fd (bck);
+                  if (__glibc_unlikely (malloc_chunk_bk (fwd) != bck))
 		    malloc_printerr ("malloc(): corrupted unsorted chunks");
-                  remainder->bk = bck;
-                  remainder->fd = fwd;
-                  bck->fd = remainder;
-                  fwd->bk = remainder;
+		  set_malloc_chunk_bk (remainder, bck);
+		  set_malloc_chunk_fd (remainder, fwd);
+		  set_malloc_chunk_fd (bck, remainder);
+		  set_malloc_chunk_bk (fwd, remainder);
                   if (!in_smallbin_range (remainder_size))
                     {
-                      remainder->fd_nextsize = NULL;
-                      remainder->bk_nextsize = NULL;
+		      set_malloc_chunk_fd_nextsize (remainder, NULL);
+		      set_malloc_chunk_bk_nextsize (remainder, NULL);
                     }
                   set_head (victim, nb | PREV_INUSE |
                             (av != &main_arena ? NON_MAIN_ARENA : 0));
@@ -4092,21 +4146,21 @@  _int_malloc (mstate av, size_t bytes)
                   /* We cannot assume the unsorted list is empty and therefore
                      have to perform a complete insert here.  */
                   bck = unsorted_chunks (av);
-                  fwd = bck->fd;
-		  if (__glibc_unlikely (fwd->bk != bck))
+                  fwd = malloc_chunk_fd (bck);
+		  if (__glibc_unlikely (malloc_chunk_bk (fwd) != bck))
 		    malloc_printerr ("malloc(): corrupted unsorted chunks 2");
-                  remainder->bk = bck;
-                  remainder->fd = fwd;
-                  bck->fd = remainder;
-                  fwd->bk = remainder;
+		  set_malloc_chunk_bk (remainder, bck);
+		  set_malloc_chunk_fd (remainder, fwd);
+		  set_malloc_chunk_fd (bck, remainder);
+		  set_malloc_chunk_bk (fwd, remainder);
 
                   /* advertise as last remainder */
                   if (in_smallbin_range (nb))
                     av->last_remainder = remainder;
                   if (!in_smallbin_range (remainder_size))
                     {
-                      remainder->fd_nextsize = NULL;
-                      remainder->bk_nextsize = NULL;
+		      set_malloc_chunk_fd_nextsize (remainder, NULL);
+		      set_malloc_chunk_bk_nextsize (remainder, NULL);
                     }
                   set_head (victim, nb | PREV_INUSE |
                             (av != &main_arena ? NON_MAIN_ARENA : 0));
@@ -4346,18 +4400,18 @@  _int_free (mstate av, mchunkptr p, int have_lock)
       */
 
       bck = unsorted_chunks(av);
-      fwd = bck->fd;
-      if (__glibc_unlikely (fwd->bk != bck))
+      fwd = malloc_chunk_fd (bck);
+      if (__glibc_unlikely (malloc_chunk_bk (fwd) != bck))
 	malloc_printerr ("free(): corrupted unsorted chunks");
-      p->fd = fwd;
-      p->bk = bck;
+      set_malloc_chunk_fd (p, fwd);
+      set_malloc_chunk_bk (p, bck);
       if (!in_smallbin_range(size))
 	{
-	  p->fd_nextsize = NULL;
-	  p->bk_nextsize = NULL;
+	  set_malloc_chunk_fd_nextsize (p, NULL);
+	  set_malloc_chunk_bk_nextsize (p, NULL);
 	}
-      bck->fd = p;
-      fwd->bk = p;
+      set_malloc_chunk_fd (bck, p);
+      set_malloc_chunk_bk (fwd, p);
 
       set_head(p, size | PREV_INUSE);
       set_foot(p, size);
@@ -4475,7 +4529,7 @@  static void malloc_consolidate(mstate av)
 	}
 
 	check_inuse_chunk(av, p);
-	nextp = p->fd;
+	nextp = malloc_chunk_fd (p);
 
 	/* Slightly streamlined version of consolidation code in free() */
 	size = chunksize (p);
@@ -4500,18 +4554,19 @@  static void malloc_consolidate(mstate av)
 	  } else
 	    clear_inuse_bit_at_offset(nextchunk, 0);
 
-	  first_unsorted = unsorted_bin->fd;
-	  unsorted_bin->fd = p;
-	  first_unsorted->bk = p;
+	  first_unsorted = malloc_chunk_fd (unsorted_bin);
+	  set_malloc_chunk_fd (unsorted_bin, p);
+	  set_malloc_chunk_bk (first_unsorted, p);
 
-	  if (!in_smallbin_range (size)) {
-	    p->fd_nextsize = NULL;
-	    p->bk_nextsize = NULL;
-	  }
+	  if (!in_smallbin_range (size))
+	    {
+	      set_malloc_chunk_fd_nextsize (p, NULL);
+	      set_malloc_chunk_bk_nextsize (p, NULL);
+	    }
 
 	  set_head(p, size | PREV_INUSE);
-	  p->bk = unsorted_bin;
-	  p->fd = first_unsorted;
+	  set_malloc_chunk_bk (p, unsorted_bin);
+	  set_malloc_chunk_fd (p, first_unsorted);
 	  set_foot(p, size);
 	}
 
@@ -4808,7 +4863,7 @@  mtrim (mstate av, size_t pad)
       {
         mbinptr bin = bin_at (av, i);
 
-        for (mchunkptr p = last (bin); p != bin; p = p->bk)
+        for (mchunkptr p = last (bin); p != bin; p = malloc_chunk_bk (p))
           {
             INTERNAL_SIZE_T size = chunksize (p);
 
@@ -4938,7 +4993,7 @@  int_mallinfo (mstate av, struct mallinfo *m)
 
   for (i = 0; i < NFASTBINS; ++i)
     {
-      for (p = fastbin (av, i); p != 0; p = p->fd)
+      for (p = fastbin (av, i); p != 0; p = malloc_chunk_fd (p))
         {
           ++nfastblocks;
           fastavail += chunksize (p);
@@ -4951,7 +5006,7 @@  int_mallinfo (mstate av, struct mallinfo *m)
   for (i = 1; i < NBINS; ++i)
     {
       b = bin_at (av, i);
-      for (p = last (b); p != b; p = p->bk)
+      for (p = last (b); p != b; p = malloc_chunk_bk (p))
         {
           ++nblocks;
           avail += chunksize (p);
@@ -5467,7 +5522,7 @@  __malloc_info (int options, FILE *fp)
 	      while (p != NULL)
 		{
 		  ++nthissize;
-		  p = p->fd;
+		  p = malloc_chunk_fd (p);
 		}
 
 	      fastavail += nthissize * thissize;
@@ -5489,7 +5544,7 @@  __malloc_info (int options, FILE *fp)
       for (size_t i = 1; i < NBINS; ++i)
 	{
 	  bin = bin_at (ar_ptr, i);
-	  r = bin->fd;
+	  r = malloc_chunk_fd (bin);
 	  sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
 	  sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
 					  = sizes[NFASTBINS - 1 + i].count = 0;
@@ -5505,7 +5560,7 @@  __malloc_info (int options, FILE *fp)
 		sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
 						   r_size);
 
-		r = r->fd;
+		r = malloc_chunk_fd (r);
 	      }
 
 	  if (sizes[NFASTBINS - 1 + i].count == 0)