[COMMITTED] Use ALIGN_UP, ALIGN_DOWN, PTR_ALIGN_UP, pagesize, and powerof2 in more places.

Message ID 54E3DBD4.9080806@redhat.com
State Committed
Headers

Commit Message

Carlos O'Donell Feb. 18, 2015, 12:24 a.m. UTC
  Following up on this patch which had an ACK from Siddhesh:
https://sourceware.org/ml/libc-alpha/2014-12/msg00968.html

I additionally audited elf/dl-reloc.c and changed it to use
ALIGN_UP, ALIGN_DOWN and PTR_ALIGN_UP as required. Makes the
code much easier to read at a glance.

No regressions on x86_64.

Committed.

Cheers,
Carlos.

2015-02-17  Carlos O'Donell  <carlos@redhat.com>

	* dl-reloc.c: Inlucde libc-internal.h.
	(_dl_try_allocate_static_tls): Call ALIGN_UP.
	(_dl_relocate_object): Call ALIGN_UP, ALIGN_DOWN, and PTR_ALIGN_DOWN.
	(_dl_protect_relro): Call ALIGN_UP and ALIGN_DOWN.
	* malloc/arena.c (new_heap): Use pagesize. Call ALIGN_UP.
	(grow_heap): Likewise.
	* malloc/malloc.c: Include libc-internal.h.
	(do_check_malloc): Call powerof2.
	(sysmalloc): Use pagesize. Call ALIGN_UP.
	(systrim): Use pagesize.
	(mremap_chunk): Use pagesize. Call ALIGN_UP.
	(__libc_valloc): Use pagesize.
	(__libc_pvalloc): Use pagesize. Call ALIGN_UP.

---
  

Comments

Mike Frysinger Feb. 19, 2015, 2:39 a.m. UTC | #1
On 17 Feb 2015 19:24, Carlos O'Donell wrote:
> Following up on this patch which had an ACK from Siddhesh:
> https://sourceware.org/ml/libc-alpha/2014-12/msg00968.html
> 
> I additionally audited elf/dl-reloc.c and changed it to use
> ALIGN_UP, ALIGN_DOWN and PTR_ALIGN_UP as required. Makes the
> code much easier to read at a glance.

nicely done ! :)
-mike
  
Carlos O'Donell Feb. 19, 2015, 2:54 a.m. UTC | #2
On 02/18/2015 09:39 PM, Mike Frysinger wrote:
> On 17 Feb 2015 19:24, Carlos O'Donell wrote:
>> Following up on this patch which had an ACK from Siddhesh:
>> https://sourceware.org/ml/libc-alpha/2014-12/msg00968.html
>>
>> I additionally audited elf/dl-reloc.c and changed it to use
>> ALIGN_UP, ALIGN_DOWN and PTR_ALIGN_UP as required. Makes the
>> code much easier to read at a glance.
> 
> nicely done ! :)

Thanks!

c.
  

Patch

diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c
index 9c2705d..b72287d 100644
--- a/elf/dl-reloc.c
+++ b/elf/dl-reloc.c
@@ -26,6 +26,7 @@ 
 #include <sys/types.h>
 #include <_itoa.h>
 #include "dynamic-link.h"
+#include <libc-internal.h>
 
 /* Statistics function.  */
 #ifdef SHARED
@@ -74,9 +75,9 @@  _dl_try_allocate_static_tls (struct link_map *map)
   map->l_tls_offset = GL(dl_tls_static_used) = offset;
 #elif TLS_DTV_AT_TP
   /* dl_tls_static_used includes the TCB at the beginning.  */
-  size_t offset = (((GL(dl_tls_static_used)
-		     - map->l_tls_firstbyte_offset
-		     + map->l_tls_align - 1) & -map->l_tls_align)
+  size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
+			    - map->l_tls_firstbyte_offset,
+			    map->l_tls_align)
 		   + map->l_tls_firstbyte_offset);
   size_t used = offset + map->l_tls_blocksize;
 
@@ -201,11 +202,10 @@  _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
 	    struct textrels *newp;
 
 	    newp = (struct textrels *) alloca (sizeof (*newp));
-	    newp->len = (((ph->p_vaddr + ph->p_memsz + GLRO(dl_pagesize) - 1)
-			  & ~(GLRO(dl_pagesize) - 1))
-			 - (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)));
-	    newp->start = ((ph->p_vaddr & ~(GLRO(dl_pagesize) - 1))
-			   + (caddr_t) l->l_addr);
+	    newp->len = ALIGN_UP (ph->p_vaddr + ph->p_memsz, GLRO(dl_pagesize))
+			- ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize));
+	    newp->start = PTR_ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize))
+			  + (caddr_t) l->l_addr;
 
 	    if (__mprotect (newp->start, newp->len, PROT_READ|PROT_WRITE) < 0)
 	      {
@@ -324,11 +324,13 @@  _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
 void internal_function
 _dl_protect_relro (struct link_map *l)
 {
-  ElfW(Addr) start = ((l->l_addr + l->l_relro_addr)
-		      & ~(GLRO(dl_pagesize) - 1));
-  ElfW(Addr) end = ((l->l_addr + l->l_relro_addr + l->l_relro_size)
-		    & ~(GLRO(dl_pagesize) - 1));
-
+  ElfW(Addr) start = ALIGN_DOWN((l->l_addr
+				 + l->l_relro_addr),
+				GLRO(dl_pagesize));
+  ElfW(Addr) end = ALIGN_DOWN((l->l_addr
+			       + l->l_relro_addr
+			       + l->l_relro_size),
+			      GLRO(dl_pagesize));
   if (start != end
       && __mprotect ((void *) start, end - start, PROT_READ) < 0)
     {
diff --git a/malloc/arena.c b/malloc/arena.c
index 886defb..8af51f0 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -510,7 +510,7 @@  static heap_info *
 internal_function
 new_heap (size_t size, size_t top_pad)
 {
-  size_t page_mask = GLRO (dl_pagesize) - 1;
+  size_t pagesize = GLRO (dl_pagesize);
   char *p1, *p2;
   unsigned long ul;
   heap_info *h;
@@ -523,7 +523,7 @@  new_heap (size_t size, size_t top_pad)
     return 0;
   else
     size = HEAP_MAX_SIZE;
-  size = (size + page_mask) & ~page_mask;
+  size = ALIGN_UP (size, pagesize);
 
   /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
      No swap space needs to be reserved for the following large
@@ -588,10 +588,10 @@  new_heap (size_t size, size_t top_pad)
 static int
 grow_heap (heap_info *h, long diff)
 {
-  size_t page_mask = GLRO (dl_pagesize) - 1;
+  size_t pagesize = GLRO (dl_pagesize);
   long new_size;
 
-  diff = (diff + page_mask) & ~page_mask;
+  diff = ALIGN_UP (diff, pagesize);
   new_size = (long) h->size + diff;
   if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
     return -1;
diff --git a/malloc/malloc.c b/malloc/malloc.c
index aa7edbf..ad9487e 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -241,6 +241,9 @@ 
 /* For MIN, MAX, powerof2.  */
 #include <sys/param.h>
 
+/* For ALIGN_UP.  */
+#include <libc-internal.h>
+
 
 /*
   Debugging:
@@ -2111,7 +2114,7 @@  do_check_malloc_state (mstate av)
     return;
 
   /* pagesize is a power of 2 */
-  assert ((GLRO (dl_pagesize) & (GLRO (dl_pagesize) - 1)) == 0);
+  assert (powerof2(GLRO (dl_pagesize)));
 
   /* A contiguous main_arena is consistent with sbrk_base.  */
   if (av == &main_arena && contiguous (av))
@@ -2266,7 +2269,7 @@  sysmalloc (INTERNAL_SIZE_T nb, mstate av)
   unsigned long remainder_size;   /* its size */
 
 
-  size_t pagemask = GLRO (dl_pagesize) - 1;
+  size_t pagesize = GLRO (dl_pagesize);
   bool tried_mmap = false;
 
 
@@ -2292,9 +2295,9 @@  sysmalloc (INTERNAL_SIZE_T nb, mstate av)
          need for further alignments unless we have have high alignment.
        */
       if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
-        size = (nb + SIZE_SZ + pagemask) & ~pagemask;
+        size = ALIGN_UP (nb + SIZE_SZ, pagesize);
       else
-        size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
+        size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
       tried_mmap = true;
 
       /* Don't try if size wraps around 0 */
@@ -2367,7 +2370,7 @@  sysmalloc (INTERNAL_SIZE_T nb, mstate av)
   assert ((old_top == initial_top (av) && old_size == 0) ||
           ((unsigned long) (old_size) >= MINSIZE &&
            prev_inuse (old_top) &&
-           ((unsigned long) old_end & pagemask) == 0));
+           ((unsigned long) old_end & (pagesize - 1)) == 0));
 
   /* Precondition: not enough current space to satisfy nb request */
   assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
@@ -2447,7 +2450,7 @@  sysmalloc (INTERNAL_SIZE_T nb, mstate av)
          previous calls. Otherwise, we correct to page-align below.
        */
 
-      size = (size + pagemask) & ~pagemask;
+      size = ALIGN_UP (size, pagesize);
 
       /*
          Don't try to call MORECORE if argument is so big as to appear
@@ -2481,7 +2484,7 @@  sysmalloc (INTERNAL_SIZE_T nb, mstate av)
 
           /* Cannot merge with old top, so add its size back in */
           if (contiguous (av))
-            size = (size + old_size + pagemask) & ~pagemask;
+            size = ALIGN_UP (size + old_size, pagesize);
 
           /* If we are relying on mmap as backup, then use larger units */
           if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
@@ -2587,7 +2590,7 @@  sysmalloc (INTERNAL_SIZE_T nb, mstate av)
 
                   /* Extend the end address to hit a page boundary */
                   end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
-                  correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
+                  correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
 
                   assert (correction >= 0);
                   snd_brk = (char *) (MORECORE (correction));
@@ -2738,10 +2741,10 @@  systrim (size_t pad, mstate av)
   long released;         /* Amount actually released */
   char *current_brk;     /* address returned by pre-check sbrk call */
   char *new_brk;         /* address returned by post-check sbrk call */
-  size_t pagesz;
+  size_t pagesize;
   long top_area;
 
-  pagesz = GLRO (dl_pagesize);
+  pagesize = GLRO (dl_pagesize);
   top_size = chunksize (av->top);
 
   top_area = top_size - MINSIZE - 1;
@@ -2749,7 +2752,7 @@  systrim (size_t pad, mstate av)
     return 0;
 
   /* Release in pagesize units, keeping at least one page */
-  extra = (top_area - pad) & ~(pagesz - 1);
+  extra = (top_area - pad) & ~(pagesize - 1);
 
   if (extra == 0)
     return 0;
@@ -2834,7 +2837,7 @@  static mchunkptr
 internal_function
 mremap_chunk (mchunkptr p, size_t new_size)
 {
-  size_t page_mask = GLRO (dl_pagesize) - 1;
+  size_t pagesize = GLRO (dl_pagesize);
   INTERNAL_SIZE_T offset = p->prev_size;
   INTERNAL_SIZE_T size = chunksize (p);
   char *cp;
@@ -2843,7 +2846,7 @@  mremap_chunk (mchunkptr p, size_t new_size)
   assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
 
   /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
-  new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
+  new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
 
   /* No need to remap if the number of pages does not change.  */
   if (size + offset == new_size)
@@ -3122,8 +3125,8 @@  __libc_valloc (size_t bytes)
     ptmalloc_init ();
 
   void *address = RETURN_ADDRESS (0);
-  size_t pagesz = GLRO (dl_pagesize);
-  return _mid_memalign (pagesz, bytes, address);
+  size_t pagesize = GLRO (dl_pagesize);
+  return _mid_memalign (pagesize, bytes, address);
 }
 
 void *
@@ -3133,18 +3136,17 @@  __libc_pvalloc (size_t bytes)
     ptmalloc_init ();
 
   void *address = RETURN_ADDRESS (0);
-  size_t pagesz = GLRO (dl_pagesize);
-  size_t page_mask = GLRO (dl_pagesize) - 1;
-  size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
+  size_t pagesize = GLRO (dl_pagesize);
+  size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
 
   /* Check for overflow.  */
-  if (bytes > SIZE_MAX - 2 * pagesz - MINSIZE)
+  if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
     {
       __set_errno (ENOMEM);
       return 0;
     }
 
-  return _mid_memalign (pagesz, rounded_bytes, address);
+  return _mid_memalign (pagesize, rounded_bytes, address);
 }
 
 void *