Add Safe-Linking to fastbins and tcache

Message ID CAA=iMULaUiUjsx2myeMRvEmgQav915HWmqG5iz3_P9EeMdW_Yw@mail.gmail.com
State Superseded
Headers

Commit Message

Eyal Itkin Feb. 2, 2020, 9:43 a.m. UTC
  Safe-Linking is a security mechanism that protects single-linked
lists (such as the fastbin and tcache) from being tampered by attackers.
The mechanism makes use of randomness from ASLR (mmap_base), and
when combined with chunk alignment integrity checks, it protects the
pointers from being hijacked by an attacker.

While Safe-Unlinking protects double-linked lists (such as the small
bins), there wasn't any similar protection for attacks against
single-linked lists. This solution protects against 3 common attacks:
  * Partial pointer override: modifies the lower bytes (Little Endian)
  * Full pointer override: hijacks the pointer to an attacker's location
  * Unaligned chunks: pointing the list to an unaligned address

The design assumes an attacker doesn't know where the heap is located,
and uses the ASLR randomness to "sign" the single-linked pointers. We
mark the pointer as P and the location in which it is stored as L, and
the calculation will be:
  * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P)
  * *L = PROTECT(P)

This way, the random bits from the address L (which start at the bit
in the PAGE_SHIFT position), will be merged with LSB of the stored
protected pointer. This protection layer prevents an attacker from
modifying the pointer into a controlled value.

An additional check that the chunks are MALLOC_ALIGNed adds an
important layer:
  * Attackers can't point to illegal (unaligned) memory addresses
  * Attackers must guess correctly the alignment bits

On standard 32 bit Linux machines, an attack will directly fail 7
out of 8 times, and on 64 bit machines it will fail 15 out of 16
times.

This proposed patch was benchmarked and it's effect on the overall
performance of the heap was negligible and couldn't be distinguished
from the default variance between tests on the vanilla version. A
similar protection was added to Chromium's version of TCMalloc
in 2013, and according to their documentation it had an overhead of
less than 2%.

For more information, please read out White Paper which can be
found here:
https://github.com/gperftools/gperftools/files/4023520/Safe-Linking-White-Paper.txt

---
 malloc/malloc.c | 61 ++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 48 insertions(+), 13 deletions(-)

 /*
   REALLOC_ZERO_BYTES_FREES should be set if a call to
@@ -2157,12 +2166,14 @@ do_check_malloc_state (mstate av)

       while (p != 0)
         {
+          if (__glibc_unlikely(!aligned_OK(p)))
+            malloc_printerr ("do_check_malloc_state(): un-aligned
fastbin chunk detected");
           /* each chunk claims to be inuse */
           do_check_inuse_chunk (av, p);
           total += chunksize (p);
           /* chunk belongs in this bin */
           assert (fastbin_index (chunksize (p)) == i);
-          p = p->fd;
+          p = REVEAL_PTR(&p->fd, p->fd, mchunkptr);
         }
     }

@@ -2923,7 +2934,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx)
      detect a double free.  */
   e->key = tcache;

-  e->next = tcache->entries[tc_idx];
+  e->next = PROTECT_PTR(&e->next, tcache->entries[tc_idx], tcache_entry *);
   tcache->entries[tc_idx] = e;
   ++(tcache->counts[tc_idx]);
 }
@@ -2934,9 +2945,11 @@ static __always_inline void *
 tcache_get (size_t tc_idx)
 {
   tcache_entry *e = tcache->entries[tc_idx];
-  tcache->entries[tc_idx] = e->next;
+  tcache->entries[tc_idx] = REVEAL_PTR(&e->next, e->next, tcache_entry *);
   --(tcache->counts[tc_idx]);
   e->key = NULL;
+  if (__glibc_unlikely(!aligned_OK(e)))
+    malloc_printerr ("malloc(): un-aligned tcache chunk detected");
   return (void *) e;
 }

@@ -2960,7 +2973,9 @@ tcache_thread_shutdown (void)
       while (tcache_tmp->entries[i])
  {
    tcache_entry *e = tcache_tmp->entries[i];
-   tcache_tmp->entries[i] = e->next;
+      if (__glibc_unlikely(!aligned_OK(e)))
+        malloc_printerr ("tcache_thread_shutdown(): un-aligned tcache
chunk detected");
+   tcache_tmp->entries[i] = REVEAL_PTR(&e->next, e->next, tcache_entry *);
    __libc_free (e);
  }
     }
@@ -3570,8 +3585,11 @@ _int_malloc (mstate av, size_t bytes)
       victim = pp; \
       if (victim == NULL) \
  break; \
+      pp = REVEAL_PTR(&victim->fd, victim->fd, mfastbinptr);             \
+      if (__glibc_unlikely(!aligned_OK(pp)))                             \
+        malloc_printerr ("malloc(): un-aligned fastbin chunk detected"); \
     } \
-  while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
+  while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
  != victim); \

   if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
@@ -3583,8 +3601,11 @@ _int_malloc (mstate av, size_t bytes)

       if (victim != NULL)
  {
+      if (__glibc_unlikely(!aligned_OK(victim)))
+        malloc_printerr ("malloc(): un-aligned fastbin chunk detected");
+
    if (SINGLE_THREAD_P)
-     *fb = victim->fd;
+     *fb = REVEAL_PTR(&victim->fd, victim->fd, mfastbinptr);
    else
      REMOVE_FB (fb, pp, victim);
    if (__glibc_likely (victim != NULL))
@@ -3605,8 +3626,10 @@ _int_malloc (mstate av, size_t bytes)
    while (tcache->counts[tc_idx] < mp_.tcache_count
  && (tc_victim = *fb) != NULL)
      {
+       if (__glibc_unlikely(!aligned_OK(tc_victim)))
+         malloc_printerr ("malloc(): un-aligned fastbin chunk detected");
        if (SINGLE_THREAD_P)
- *fb = tc_victim->fd;
+ *fb = REVEAL_PTR(&tc_victim->fd, tc_victim->fd, mfastbinptr);
        else
  {
    REMOVE_FB (fb, pp, tc_victim);
@@ -4196,11 +4219,15 @@ _int_free (mstate av, mchunkptr p, int have_lock)
      LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
      for (tmp = tcache->entries[tc_idx];
  tmp;
- tmp = tmp->next)
+ tmp = REVEAL_PTR(&tmp->next, tmp->next, tcache_entry *))
+        {
+       if (__glibc_unlikely(!aligned_OK(tmp)))
+ malloc_printerr ("free(): un-aligned chunk detected in tcache 2");
        if (tmp == e)
  malloc_printerr ("free(): double free detected in tcache 2");
      /* If we get here, it was a coincidence.  We've wasted a
         few cycles, but don't abort.  */
+        }
    }

  if (tcache->counts[tc_idx] < mp_.tcache_count)
@@ -4264,7 +4291,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
     add (i.e., double free).  */
  if (__builtin_expect (old == p, 0))
    malloc_printerr ("double free or corruption (fasttop)");
- p->fd = old;
+ p->fd = PROTECT_PTR(&p->fd, old, mchunkptr);
  *fb = p;
       }
     else
@@ -4274,7 +4301,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
       add (i.e., double free).  */
    if (__builtin_expect (old == p, 0))
      malloc_printerr ("double free or corruption (fasttop)");
-   p->fd = old2 = old;
+   old2 = old;
+   p->fd = PROTECT_PTR(&p->fd, old, mchunkptr);
  }
       while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
       != old2);
@@ -4472,13 +4500,16 @@ static void malloc_consolidate(mstate av)
     if (p != 0) {
       do {
  {
+   if (__glibc_unlikely(!aligned_OK(p)))
+     malloc_printerr ("malloc_consolidate(): un-aligned fastbin chunk
detected");
+
    unsigned int idx = fastbin_index (chunksize (p));
    if ((&fastbin (av, idx)) != fb)
      malloc_printerr ("malloc_consolidate(): invalid chunk size");
  }

  check_inuse_chunk(av, p);
- nextp = p->fd;
+ nextp = REVEAL_PTR(&p->fd, p->fd, mchunkptr);

  /* Slightly streamlined version of consolidation code in free() */
  size = chunksize (p);
@@ -4896,8 +4927,10 @@ int_mallinfo (mstate av, struct mallinfo *m)

   for (i = 0; i < NFASTBINS; ++i)
     {
-      for (p = fastbin (av, i); p != 0; p = p->fd)
+      for (p = fastbin (av, i); p != 0; p = REVEAL_PTR(&p->fd, p->fd,
mchunkptr))
         {
+          if (__glibc_unlikely(!aligned_OK(p)))
+            malloc_printerr ("int_mallinfo(): un-aligned fastbin
chunk detected");
           ++nfastblocks;
           fastavail += chunksize (p);
         }
@@ -5437,8 +5470,10 @@ __malloc_info (int options, FILE *fp)

        while (p != NULL)
  {
+          if (__glibc_unlikely(!aligned_OK(p)))
+            malloc_printerr ("__malloc_info(): un-aligned fastbin
chunk detected");
    ++nthissize;
-   p = p->fd;
+   p = REVEAL_PTR(&p->fd, p->fd, mchunkptr);
  }

        fastavail += nthissize * thissize;
  

Comments

Carlos O'Donell Feb. 3, 2020, 6:09 p.m. UTC | #1
On 2/2/20 4:43 AM, Eyal Itkin wrote:
> Safe-Linking is a security mechanism that protects single-linked
> lists (such as the fastbin and tcache) from being tampered by attackers.
> The mechanism makes use of randomness from ASLR (mmap_base), and
> when combined with chunk alignment integrity checks, it protects the
> pointers from being hijacked by an attacker.
> 
> While Safe-Unlinking protects double-linked lists (such as the small
> bins), there wasn't any similar protection for attacks against
> single-linked lists. This solution protects against 3 common attacks:
>   * Partial pointer override: modifies the lower bytes (Little Endian)
>   * Full pointer override: hijacks the pointer to an attacker's location
>   * Unaligned chunks: pointing the list to an unaligned address
> 
> The design assumes an attacker doesn't know where the heap is located,
> and uses the ASLR randomness to "sign" the single-linked pointers. We
> mark the pointer as P and the location in which it is stored as L, and
> the calculation will be:
>   * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P)
>   * *L = PROTECT(P)
> 
> This way, the random bits from the address L (which start at the bit
> in the PAGE_SHIFT position), will be merged with LSB of the stored
> protected pointer. This protection layer prevents an attacker from
> modifying the pointer into a controlled value.
> 
> An additional check that the chunks are MALLOC_ALIGNed adds an
> important layer:
>   * Attackers can't point to illegal (unaligned) memory addresses
>   * Attackers must guess correctly the alignment bits
> 
> On standard 32 bit Linux machines, an attack will directly fail 7
> out of 8 times, and on 64 bit machines it will fail 15 out of 16
> times.
> 
> This proposed patch was benchmarked and it's effect on the overall
> performance of the heap was negligible and couldn't be distinguished
> from the default variance between tests on the vanilla version. A
> similar protection was added to Chromium's version of TCMalloc
> in 2013, and according to their documentation it had an overhead of
> less than 2%.
> 
> For more information, please read out White Paper which can be
> found here:
> https://github.com/gperftools/gperftools/files/4023520/Safe-Linking-White-Paper.txt

The concept behind your patch looks really interesting, thank you for
working on this patch!

One of the things we'll need from you is a copyright assignment
before the glibc project can accept the patches.

Please have a look at our "Contribution Checklist"
https://sourceware.org/glibc/wiki/Contribution%20checklist

"FSF Copyright Assignment"
https://sourceware.org/glibc/wiki/Contribution%20checklist#FSF_copyright_Assignment

I always suggest a futures assignment so we can accept this patch
and all future patches you submit to glibc:
http://git.savannah.gnu.org/cgit/gnulib.git/plain/doc/Copyright/request-assign.future

Thank you for your contribution!
  
Eyal Itkin Feb. 3, 2020, 7:47 p.m. UTC | #2
Thanks for your fast response.
I've just sent an assignment using your supplied form.

If anything else is needed I will be more than happy to assist.

Eyal.

On Mon, Feb 3, 2020 at 8:10 PM Carlos O'Donell <codonell@redhat.com> wrote:
>
> On 2/2/20 4:43 AM, Eyal Itkin wrote:
> > Safe-Linking is a security mechanism that protects single-linked
> > lists (such as the fastbin and tcache) from being tampered by attackers.
> > The mechanism makes use of randomness from ASLR (mmap_base), and
> > when combined with chunk alignment integrity checks, it protects the
> > pointers from being hijacked by an attacker.
> >
> > While Safe-Unlinking protects double-linked lists (such as the small
> > bins), there wasn't any similar protection for attacks against
> > single-linked lists. This solution protects against 3 common attacks:
> >   * Partial pointer override: modifies the lower bytes (Little Endian)
> >   * Full pointer override: hijacks the pointer to an attacker's location
> >   * Unaligned chunks: pointing the list to an unaligned address
> >
> > The design assumes an attacker doesn't know where the heap is located,
> > and uses the ASLR randomness to "sign" the single-linked pointers. We
> > mark the pointer as P and the location in which it is stored as L, and
> > the calculation will be:
> >   * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P)
> >   * *L = PROTECT(P)
> >
> > This way, the random bits from the address L (which start at the bit
> > in the PAGE_SHIFT position), will be merged with LSB of the stored
> > protected pointer. This protection layer prevents an attacker from
> > modifying the pointer into a controlled value.
> >
> > An additional check that the chunks are MALLOC_ALIGNed adds an
> > important layer:
> >   * Attackers can't point to illegal (unaligned) memory addresses
> >   * Attackers must guess correctly the alignment bits
> >
> > On standard 32 bit Linux machines, an attack will directly fail 7
> > out of 8 times, and on 64 bit machines it will fail 15 out of 16
> > times.
> >
> > This proposed patch was benchmarked and it's effect on the overall
> > performance of the heap was negligible and couldn't be distinguished
> > from the default variance between tests on the vanilla version. A
> > similar protection was added to Chromium's version of TCMalloc
> > in 2013, and according to their documentation it had an overhead of
> > less than 2%.
> >
> > For more information, please read out White Paper which can be
> > found here:
> > https://github.com/gperftools/gperftools/files/4023520/Safe-Linking-White-Paper.txt
>
> The concept behind your patch looks really interesting, thank you for
> working on this patch!
>
> One of the things we'll need from you is a copyright assignment
> before the glibc project can accept the patches.
>
> Please have a look at our "Contribution Checklist"
> https://sourceware.org/glibc/wiki/Contribution%20checklist
>
> "FSF Copyright Assignment"
> https://sourceware.org/glibc/wiki/Contribution%20checklist#FSF_copyright_Assignment
>
> I always suggest a futures assignment so we can accept this patch
> and all future patches you submit to glibc:
> http://git.savannah.gnu.org/cgit/gnulib.git/plain/doc/Copyright/request-assign.future
>
> Thank you for your contribution!
>
> --
> Cheers,
> Carlos.
>
  
Eyal Itkin March 5, 2020, 1:19 a.m. UTC | #3
It took some time, but I signed all the forms, and just received back
the mutually signed FSF form.

What is needed now in order to proceed with the patch that I've submitted?

Thanks again for your cooperation,
Eyal Itkin.


On Mon, 3 Feb 2020, 21:47 Eyal Itkin, <eyal.itkin@gmail.com> wrote:
>
> Thanks for your fast response.
> I've just sent an assignment using your supplied form.
>
> If anything else is needed I will be more than happy to assist.
>
> Eyal.
>
> On Mon, Feb 3, 2020 at 8:10 PM Carlos O'Donell <codonell@redhat.com> wrote:
> >
> > On 2/2/20 4:43 AM, Eyal Itkin wrote:
> > > Safe-Linking is a security mechanism that protects single-linked
> > > lists (such as the fastbin and tcache) from being tampered by attackers.
> > > The mechanism makes use of randomness from ASLR (mmap_base), and
> > > when combined with chunk alignment integrity checks, it protects the
> > > pointers from being hijacked by an attacker.
> > >
> > > While Safe-Unlinking protects double-linked lists (such as the small
> > > bins), there wasn't any similar protection for attacks against
> > > single-linked lists. This solution protects against 3 common attacks:
> > >   * Partial pointer override: modifies the lower bytes (Little Endian)
> > >   * Full pointer override: hijacks the pointer to an attacker's location
> > >   * Unaligned chunks: pointing the list to an unaligned address
> > >
> > > The design assumes an attacker doesn't know where the heap is located,
> > > and uses the ASLR randomness to "sign" the single-linked pointers. We
> > > mark the pointer as P and the location in which it is stored as L, and
> > > the calculation will be:
> > >   * PROTECT(P) := (L >> PAGE_SHIFT) XOR (P)
> > >   * *L = PROTECT(P)
> > >
> > > This way, the random bits from the address L (which start at the bit
> > > in the PAGE_SHIFT position), will be merged with LSB of the stored
> > > protected pointer. This protection layer prevents an attacker from
> > > modifying the pointer into a controlled value.
> > >
> > > An additional check that the chunks are MALLOC_ALIGNed adds an
> > > important layer:
> > >   * Attackers can't point to illegal (unaligned) memory addresses
> > >   * Attackers must guess correctly the alignment bits
> > >
> > > On standard 32 bit Linux machines, an attack will directly fail 7
> > > out of 8 times, and on 64 bit machines it will fail 15 out of 16
> > > times.
> > >
> > > This proposed patch was benchmarked and it's effect on the overall
> > > performance of the heap was negligible and couldn't be distinguished
> > > from the default variance between tests on the vanilla version. A
> > > similar protection was added to Chromium's version of TCMalloc
> > > in 2013, and according to their documentation it had an overhead of
> > > less than 2%.
> > >
> > > For more information, please read out White Paper which can be
> > > found here:
> > > https://github.com/gperftools/gperftools/files/4023520/Safe-Linking-White-Paper.txt
> >
> > The concept behind your patch looks really interesting, thank you for
> > working on this patch!
> >
> > One of the things we'll need from you is a copyright assignment
> > before the glibc project can accept the patches.
> >
> > Please have a look at our "Contribution Checklist"
> > https://sourceware.org/glibc/wiki/Contribution%20checklist
> >
> > "FSF Copyright Assignment"
> > https://sourceware.org/glibc/wiki/Contribution%20checklist#FSF_copyright_Assignment
> >
> > I always suggest a futures assignment so we can accept this patch
> > and all future patches you submit to glibc:
> > http://git.savannah.gnu.org/cgit/gnulib.git/plain/doc/Copyright/request-assign.future
> >
> > Thank you for your contribution!
> >
> > --
> > Cheers,
> > Carlos.
> >
  
Carlos O'Donell March 5, 2020, 3:30 a.m. UTC | #4
On 3/4/20 8:19 PM, Eyal Itkin wrote:
> It took some time, but I signed all the forms, and just received back
> the mutually signed FSF form.

This is great news!

I have reviewed the results, and unfortunately I have a question for
the FSF which I hope gets resolved quickly. Once it's resolved we can
start reviewing the patches (looks like a typo).
 
> What is needed now in order to proceed with the patch that I've submitted?

Once I get confirmation from the FSF about the issue then I can review the
patch starting with the high-level architecture.

> Thanks again for your cooperation,

Thank you for working through the contribution process!
  
Carlos O'Donell March 18, 2020, 9:28 p.m. UTC | #5
On Wed, Mar 4, 2020 at 10:30 PM Carlos O'Donell <carlos@redhat.com> wrote:
>
> On 3/4/20 8:19 PM, Eyal Itkin wrote:
> > It took some time, but I signed all the forms, and just received back
> > the mutually signed FSF form.
>
> This is great news!
>
> I have reviewed the results, and unfortunately I have a question for
> the FSF which I hope gets resolved quickly. Once it's resolved we can
> start reviewing the patches (looks like a typo).

OK, the typo has been fixed, sorry for this.

> > What is needed now in order to proceed with the patch that I've submitted?
>
> Once I get confirmation from the FSF about the issue then I can review the
> patch starting with the high-level architecture.

Now we can start the review!

Cheers,
Carlos.
  
Eyal Itkin March 19, 2020, 6:41 a.m. UTC | #6
Happy to hear that all the legal issues have been resolved.

What are the next steps? Have you read the white paper?
(https://github.com/gperftools/gperftools/files/4023520/Safe-Linking-White-Paper.txt)
It should contain a detailed explanation for the reasons behind this
feature, and also explain how it works.

The patch itself simply applies the single-linked-list pointer masking
(and alignment checks) to both the Fast Bins and TCache lists.

Will be happy to assist in any way I can,
Eyal.

On Wed, Mar 18, 2020 at 11:28 PM Carlos O'Donell <carlos@redhat.com> wrote:
>
> On Wed, Mar 4, 2020 at 10:30 PM Carlos O'Donell <carlos@redhat.com> wrote:
> >
> > On 3/4/20 8:19 PM, Eyal Itkin wrote:
> > > It took some time, but I signed all the forms, and just received back
> > > the mutually signed FSF form.
> >
> > This is great news!
> >
> > I have reviewed the results, and unfortunately I have a question for
> > the FSF which I hope gets resolved quickly. Once it's resolved we can
> > start reviewing the patches (looks like a typo).
>
> OK, the typo has been fixed, sorry for this.
>
> > > What is needed now in order to proceed with the patch that I've submitted?
> >
> > Once I get confirmation from the FSF about the issue then I can review the
> > patch starting with the high-level architecture.
>
> Now we can start the review!
>
> Cheers,
> Carlos.
>
  

Patch

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 7d7d30bb13..25a745df9a 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -327,6 +327,15 @@  __malloc_assert (const char *assertion, const
char *file, unsigned int line,
 # define MAX_TCACHE_COUNT UINT16_MAX
 #endif

+/*
+  Safe-Linking:
+  Use randomness from ASLR (mmap_base) to protect single-linked lists
+  of Fast-Bins and TCache. Together with allocation alignment checks, this
+  mechanism reduces the risk of pointer hijacking, as was done with
+  Safe-Unlinking in the double-linked lists of Small-Bins.
+*/
+#define PROTECT_PTR(pos, ptr, type)     ((type)((((size_t)pos) >>
PAGE_SHIFT) ^ ((size_t)ptr)))
+#define REVEAL_PTR(pos, ptr, type)      PROTECT_PTR(pos, ptr, type)