Patchwork elf: fix maplength computation when loading a library

login
register
mail settings
Submitter Riccardo Schirone
Date Dec. 2, 2019, 10:30 a.m.
Message ID <20191202102657.GC13595@fedorawork>
Download mbox | patch
Permalink /patch/36420/
State New
Headers show

Comments

Riccardo Schirone - Dec. 2, 2019, 10:30 a.m.
glibc assumes loadable segment entries (PT_LOAD) in the program header
table appear in ascending order, sorted on the p_vaddr member. While
loading a library, function _dl_map_segments() in dl-map-segments.h lets
the kernel map the first segment of the library anywhere it likes, but
it requires a large enough chunk of memory to include all the loadable
segment entries. Considering how mmap works (on x86_64 at least), the
allocation happens to be close to other libraries and to the ld-linux
loader segments.

However, if a library is created such that the first loadable segment
entry is not the one with the lowest Virtual Address or the last
loadable segment entry is not the one with the highest End Virtual
Address, it is possible to make ld-linux wrongly compute the overall
size required to load the library in the process' memory. When this
happens, a malicious library can easily overwrite other libraries that
were already loaded, even while just executing ldd.

This patch fixes the bug by computing the highest and lowest addresses
while converting PT_LOAD segments into loadcmd structures. The size of
the ELF file (maplength) is then computed using those values instead of
relying on the sorting of loadable segments.

An attacker could still try to overwrite existing libraries by using
an executable with loadable segments with fixed addresses, however this
would be much harder as ASLR should make existing libraries' position
random enough.

Fixes: #22851
---
 elf/dl-load.c         | 30 ++++++++++++++++++++++++++----
 elf/dl-load.h         |  6 ++++--
 elf/dl-map-segments.h | 16 ++++++++++------
 3 files changed, 40 insertions(+), 12 deletions(-)

Patch

diff --git a/elf/dl-load.c b/elf/dl-load.c
index 6cdd11e6b0..50966dc7b4 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1032,8 +1032,10 @@  _dl_map_object_from_fd (const char *name, const char *origname, int fd,
   {
     /* Scan the program header table, collecting its load commands.  */
     struct loadcmd loadcmds[l->l_phnum];
+    struct loadcmd *first_loadcmd = NULL;
     size_t nloadcmds = 0;
     bool has_holes = false;
+    ElfW(Addr) last_alloc_end = 0, last_map_start = 0;
 
     /* The struct is initialized to zero so this is not necessary:
     l->l_ld = 0;
@@ -1083,9 +1085,28 @@  _dl_map_object_from_fd (const char *name, const char *origname, int fd,
 	  c->allocend = ph->p_vaddr + ph->p_memsz;
 	  c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
 
+	  /* Determine size of the segments to be loaded */
+	  if (nloadcmds == 1)
+	    {
+	      last_map_start = c->mapstart;
+	      last_alloc_end = c->allocend;
+	      first_loadcmd = c;
+	    }
+	  else
+	    {
+	      if (first_loadcmd->mapstart > c->mapstart)
+	        first_loadcmd = c;
+	      if (last_alloc_end < c->allocend)
+	        last_alloc_end = c->allocend;
+	      if (last_map_start < c->mapstart)
+	        last_map_start = c->mapstart;
+	    }
+
 	  /* Determine whether there is a gap between the last segment
-	     and this one.  */
-	  if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
+	     and this one. If the segments are not sorted by mapstart,
+	     assume there is a gap */
+	  if (nloadcmds > 1 && (c[-1].mapend != c->mapstart
+				|| c[-1].mapstart > c->mapstart))
 	    has_holes = true;
 
 	  /* Optimize a common case.  */
@@ -1177,14 +1198,15 @@  _dl_map_object_from_fd (const char *name, const char *origname, int fd,
       }
 
     /* Length of the sections to be loaded.  */
-    maplength = loadcmds[nloadcmds - 1].allocend - loadcmds[0].mapstart;
+    maplength = last_alloc_end - first_loadcmd->mapstart;
 
     /* Now process the load commands and map segments into memory.
        This is responsible for filling in:
        l_map_start, l_map_end, l_addr, l_contiguous, l_text_end, l_phdr
      */
     errstring = _dl_map_segments (l, fd, header, type, loadcmds, nloadcmds,
-				  maplength, has_holes, loader);
+				  maplength, has_holes, loader, first_loadcmd,
+				  last_map_start);
     if (__glibc_unlikely (errstring != NULL))
       goto call_lose;
   }
diff --git a/elf/dl-load.h b/elf/dl-load.h
index a1f7b35346..c363f41840 100644
--- a/elf/dl-load.h
+++ b/elf/dl-load.h
@@ -87,7 +87,7 @@  static __always_inline void
 _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header,
                          const struct loadcmd *c)
 {
-  if (c->prot & PROT_EXEC)
+  if ((c->prot & PROT_EXEC) && l->l_text_end < l->l_addr + c->mapend)
     l->l_text_end = l->l_addr + c->mapend;
 
   if (l->l_phdr == 0
@@ -118,7 +118,9 @@  static const char *_dl_map_segments (struct link_map *l, int fd,
                                      size_t nloadcmds,
                                      const size_t maplength,
                                      bool has_holes,
-                                     struct link_map *loader);
+                                     struct link_map *loader,
+                                     const struct loadcmd *first_loadcmd,
+                                     const ElfW(Addr) last_map_start);
 
 /* All the error message strings _dl_map_segments might return are
    listed here so that different implementations in different sysdeps
diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index 46cfb922bf..4e653033e9 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -30,9 +30,12 @@  _dl_map_segments (struct link_map *l, int fd,
                   const ElfW(Ehdr) *header, int type,
                   const struct loadcmd loadcmds[], size_t nloadcmds,
                   const size_t maplength, bool has_holes,
-                  struct link_map *loader)
+                  struct link_map *loader,
+                  const struct loadcmd *first_loadcmd,
+                  const ElfW(Addr) last_map_start)
 {
-  const struct loadcmd *c = loadcmds;
+  const struct loadcmd *c = first_loadcmd;
+  size_t i = 0, first_i = first_loadcmd - loadcmds;
 
   if (__glibc_likely (type == ET_DYN))
     {
@@ -63,7 +66,7 @@  _dl_map_segments (struct link_map *l, int fd,
       l->l_map_end = l->l_map_start + maplength;
       l->l_addr = l->l_map_start - c->mapstart;
 
-      if (has_holes)
+      if (has_holes && last_map_start > c->mapend)
         {
           /* Change protection on the excess portion to disallow all access;
              the portions we do not remap later will be inaccessible as if
@@ -72,7 +75,7 @@  _dl_map_segments (struct link_map *l, int fd,
              mapping.  */
           if (__glibc_unlikely
               (__mprotect ((caddr_t) (l->l_addr + c->mapend),
-                           loadcmds[nloadcmds - 1].mapstart - c->mapend,
+                           last_map_start - c->mapend,
                            PROT_NONE) < 0))
             return DL_MAP_SEGMENTS_ERROR_MPROTECT;
         }
@@ -87,8 +90,9 @@  _dl_map_segments (struct link_map *l, int fd,
   l->l_map_end = l->l_map_start + maplength;
   l->l_contiguous = !has_holes;
 
-  while (c < &loadcmds[nloadcmds])
+  while (i < nloadcmds)
     {
+      c = &loadcmds[(first_i + i) % nloadcmds];
       if (c->mapend > c->mapstart
           /* Map the segment contents from the file.  */
           && (__mmap ((void *) (l->l_addr + c->mapstart),
@@ -146,7 +150,7 @@  _dl_map_segments (struct link_map *l, int fd,
             }
         }
 
-      ++c;
+      ++i;
     }
 
   /* Notify ELF_PREFERRED_ADDRESS that we have to load this one