[v4,12/14] elf: Move most of the _dl_find_object data to the protected heap

Message ID e9363b769761b8014f8691dbac07709957d16347.1738530302.git.fweimer@redhat.com (mailing list archive)
State New
Headers
Series RELRO link maps |

Checks

Context Check Description
redhat-pt-bot/TryBot-apply_patch success Patch applied to master at the time it was sent
linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 success Build passed
linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 fail Test failed
linaro-tcwg-bot/tcwg_glibc_build--master-arm success Build passed
linaro-tcwg-bot/tcwg_glibc_check--master-arm fail Test failed

Commit Message

Florian Weimer Feb. 2, 2025, 9:13 p.m. UTC
  The heap is mostly read-only by design, so allocation padding is
no longer required.  The protected heap is not visible to malloc,
so it's not necessary to deallocate the allocations during
__libc_freeres anymore.  Also put critical pointers into
the protected memory area.

With this change, all control data for _dl_find_object is either
RELRO data, or in the protected area, or tightly constrained
(the version counter is always masked using & 1 before array
indexing).
---
 elf/dl-find_object.c       | 133 ++++++++++---------------------------
 elf/dl-find_object.h       |   3 -
 elf/dl-libc_freeres.c      |   2 -
 sysdeps/generic/ldsodefs.h |   9 +++
 4 files changed, 45 insertions(+), 102 deletions(-)
  

Comments

Carlos O'Donell Feb. 3, 2025, 2:29 p.m. UTC | #1
On 2/2/25 4:13 PM, Florian Weimer wrote:
> The heap is mostly read-only by design, so allocation padding is
> no longer required.  The protected heap is not visible to malloc,
> so it's not necessary to deallocate the allocations during
> __libc_freeres anymore.  Also put critical pointers into
> the protected memory area.
> 
> With this change, all control data for _dl_find_object is either
> RELRO data, or in the protected area, or tightly constrained
> (the version counter is always masked using & 1 before array
> indexing).

It may or may not be relevant, but the Linaro CI tester had an intermittent 
failure after this patch which caused a segfault in the test.

Have you seen this?

FAIL: resolv/tst-resolv-ai_idn-nolibidn2
original exit status 1
Didn't expect signal from child: got `Segmentation fault'

> ---
>  elf/dl-find_object.c       | 133 ++++++++++---------------------------
>  elf/dl-find_object.h       |   3 -
>  elf/dl-libc_freeres.c      |   2 -
>  sysdeps/generic/ldsodefs.h |   9 +++
>  4 files changed, 45 insertions(+), 102 deletions(-)
> 
> diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
> index d8d09ffe0b..332f6765a4 100644
> --- a/elf/dl-find_object.c
> +++ b/elf/dl-find_object.c
> @@ -20,6 +20,7 @@
>  #include <atomic.h>
>  #include <atomic_wide_counter.h>
>  #include <dl-find_object.h>
> +#include <dl-protmem.h>
>  #include <dlfcn.h>
>  #include <ldsodefs.h>
>  #include <link.h>
> @@ -91,8 +92,9 @@ static struct dl_find_object_internal *_dlfo_nodelete_mappings
>     to avoid data races.
>  
>     The memory allocations are never deallocated, but slots used for
> -   objects that have been dlclose'd can be reused by dlopen.  The
> -   memory can live in the regular C malloc heap.
> +   objects that have been dlclose'd can be reused by dlopen.
> +   Allocations come from the protected memory heap.  This makes it
> +   harder to inject DWARF data.
>  
>     The segments are populated from the start of the list, with the
>     mappings with the highest address.  Only if this segment is full,
> @@ -111,9 +113,6 @@ struct dlfo_mappings_segment
>       initialization; read in the TM region.  */
>    struct dlfo_mappings_segment *previous;
>  
> -  /* Used by __libc_freeres to deallocate malloc'ed memory.  */
> -  void *to_free;
> -
>    /* Count of array elements in use and allocated.  */
>    size_t size;                  /* Read in the TM region.  */
>    size_t allocated;
> @@ -121,13 +120,6 @@ struct dlfo_mappings_segment
>    struct dl_find_object_internal objects[]; /* Read in the TM region.  */
>  };
>  
> -/* To achieve async-signal-safety, two copies of the data structure
> -   are used, so that a signal handler can still use this data even if
> -   dlopen or dlclose modify the other copy.  The the least significant
> -   bit in _dlfo_loaded_mappings_version determines which array element
> -   is the currently active region.  */
> -static struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
> -
>  /* Returns the number of actually used elements in all segments
>     starting at SEG.  */
>  static inline size_t
> @@ -154,44 +146,15 @@ _dlfo_mappings_segment_count_allocated (struct dlfo_mappings_segment *seg)
>  
>  /* This is essentially an arbitrary value.  dlopen allocates plenty of
>     memory anyway, so over-allocated a bit does not hurt.  Not having
> -   many small-ish segments helps to avoid many small binary searches.
> -   Not using a power of 2 means that we do not waste an extra page
> -   just for the malloc header if a mapped allocation is used in the
> -   glibc allocator.  */
> -enum { dlfo_mappings_initial_segment_size = 63 };
> -
> -/* Allocate an empty segment.  This used for the first ever
> -   allocation.  */
> -static struct dlfo_mappings_segment *
> -_dlfo_mappings_segment_allocate_unpadded (size_t size)
> -{
> -  if (size < dlfo_mappings_initial_segment_size)
> -    size = dlfo_mappings_initial_segment_size;
> -  /* No overflow checks here because the size is a mapping count, and
> -     struct link_map is larger than what we allocate here.  */
> -  enum
> -    {
> -      element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
> -    };
> -  size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
> -                        + size * element_size);
> -  struct dlfo_mappings_segment *result = malloc (to_allocate);
> -  if (result != NULL)
> -    {
> -      result->previous = NULL;
> -      result->to_free = NULL; /* Minimal malloc memory cannot be freed.  */
> -      result->size = 0;
> -      result->allocated = size;
> -    }
> -  return result;
> -}
> +   many small-ish segments helps to avoid many small binary searches.  */
> +enum { dlfo_mappings_initial_segment_size = 64 };
>  
>  /* Allocate an empty segment that is at least SIZE large.  PREVIOUS
>     points to the chain of previously allocated segments and can be
>     NULL.  */
>  static struct dlfo_mappings_segment *
>  _dlfo_mappings_segment_allocate (size_t size,
> -                                 struct dlfo_mappings_segment * previous)
> +                                 struct dlfo_mappings_segment *previous)
>  {
>    /* Exponential sizing policies, so that lookup approximates a binary
>       search.  */
> @@ -200,11 +163,10 @@ _dlfo_mappings_segment_allocate (size_t size,
>      if (previous == NULL)
>        minimum_growth = dlfo_mappings_initial_segment_size;
>      else
> -      minimum_growth = 2* previous->allocated;
> +      minimum_growth = 2 * previous->allocated;
>      if (size < minimum_growth)
>        size = minimum_growth;
>    }
> -  enum { cache_line_size_estimate = 128 };
>    /* No overflow checks here because the size is a mapping count, and
>       struct link_map is larger than what we allocate here.  */
>    enum
> @@ -212,36 +174,28 @@ _dlfo_mappings_segment_allocate (size_t size,
>        element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
>      };
>    size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
> -                        + size * element_size
> -                        + 2 * cache_line_size_estimate);
> -  char *ptr = malloc (to_allocate);
> -  if (ptr == NULL)
> +                        + size * element_size);
> +  struct dlfo_mappings_segment *result = _dl_protmem_allocate (to_allocate);
> +  if (result == NULL)
>      return NULL;
> -  char *original_ptr = ptr;
> -  /* Start and end at a (conservative) 128-byte cache line boundary.
> -     Do not use memalign for compatibility with partially interposing
> -     malloc implementations.  */
> -  char *end = PTR_ALIGN_DOWN (ptr + to_allocate, cache_line_size_estimate);
> -  ptr = PTR_ALIGN_UP (ptr, cache_line_size_estimate);
> -  struct dlfo_mappings_segment *result
> -    = (struct dlfo_mappings_segment *) ptr;
>    result->previous = previous;
> -  result->to_free = original_ptr;
>    result->size = 0;
> -  /* We may have obtained slightly more space if malloc happened
> -     to provide an over-aligned pointer.  */
> -  result->allocated = (((uintptr_t) (end - ptr)
> -                        - sizeof (struct dlfo_mappings_segment))
> -                       / element_size);
> -  assert (result->allocated >= size);
> +  result->allocated = size;
>    return result;
>  }
>  
>  /* Monotonic counter for software transactional memory.  The lowest
> -   bit indicates which element of the _dlfo_loaded_mappings contains
> -   up-to-date data.  */
> +   bit indicates which element of the GLPM (dlfo_loaded_mappings)
> +   contains up-to-date data.  This achieves async-signal-safety for
> +   _dl_find_object: a signal handler can still use the
> +   GLPM (dlfo_loaded_mappings) data even if dlopen or dlclose
> +   modify the other copy.  */
>  static __atomic_wide_counter _dlfo_loaded_mappings_version;
>  
> +#ifndef SHARED
> +struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
> +#endif
> +
>  /* TM version at the start of the read operation.  */
>  static inline uint64_t
>  _dlfo_read_start_version (void)
> @@ -309,7 +263,7 @@ _dlfo_read_success (uint64_t start_version)
>  static struct dlfo_mappings_segment *
>  _dlfo_mappings_active_segment (uint64_t start_version)
>  {
> -  return _dlfo_loaded_mappings[start_version & 1];
> +  return GLPM (dlfo_loaded_mappings)[start_version & 1];
>  }
>  
>  /* Searches PC among the address-sorted array [FIRST1, FIRST1 +
> @@ -518,10 +472,10 @@ _dlfo_process_initial (void)
>              }
>            else if (l->l_type == lt_loaded)
>              {
> -              if (_dlfo_loaded_mappings[0] != NULL)
> +              if (GLPM (dlfo_loaded_mappings)[0] != NULL)
>                  /* Second pass only.  */
>                  _dl_find_object_from_map
> -                  (l, &_dlfo_loaded_mappings[0]->objects[loaded]);
> +                  (l, &GLPM (dlfo_loaded_mappings)[0]->objects[loaded]);
>                ++loaded;
>              }
>          }
> @@ -577,13 +531,14 @@ _dl_find_object_init (void)
>  
>    /* Allocate the data structures.  */
>    size_t loaded_size = _dlfo_process_initial ();
> -  _dlfo_nodelete_mappings = malloc (_dlfo_nodelete_mappings_size
> -                                    * sizeof (*_dlfo_nodelete_mappings));
> +  _dlfo_nodelete_mappings
> +    = _dl_protmem_allocate (_dlfo_nodelete_mappings_size
> +                            * sizeof (*_dlfo_nodelete_mappings));
>    if (loaded_size > 0)
> -    _dlfo_loaded_mappings[0]
> -      = _dlfo_mappings_segment_allocate_unpadded (loaded_size);
> +    GLPM (dlfo_loaded_mappings)[0]
> +      = _dlfo_mappings_segment_allocate (loaded_size, NULL);
>    if (_dlfo_nodelete_mappings == NULL
> -      || (loaded_size > 0 && _dlfo_loaded_mappings[0] == NULL))
> +      || (loaded_size > 0 && GLPM (dlfo_loaded_mappings)[0] == NULL))
>      _dl_fatal_printf ("\
>  Fatal glibc error: cannot allocate memory for find-object data\n");
>    /* Fill in the data with the second call.  */
> @@ -599,8 +554,8 @@ Fatal glibc error: cannot allocate memory for find-object data\n");
>        _dlfo_nodelete_mappings_end = _dlfo_nodelete_mappings[last_idx].map_end;
>      }
>    if (loaded_size > 0)
> -    _dlfo_sort_mappings (_dlfo_loaded_mappings[0]->objects,
> -                         _dlfo_loaded_mappings[0]->size);
> +    _dlfo_sort_mappings (GLPM (dlfo_loaded_mappings)[0]->objects,
> +                         GLPM (dlfo_loaded_mappings)[0]->size);
>  }
>  
>  static void
> @@ -654,11 +609,11 @@ _dl_find_object_update_1 (struct link_map **loaded, size_t count)
>    int active_idx = _dlfo_read_version_locked () & 1;
>  
>    struct dlfo_mappings_segment *current_seg
> -    = _dlfo_loaded_mappings[active_idx];
> +    = GLPM (dlfo_loaded_mappings)[active_idx];
>    size_t current_used = _dlfo_mappings_segment_count_used (current_seg);
>  
>    struct dlfo_mappings_segment *target_seg
> -    = _dlfo_loaded_mappings[!active_idx];
> +    = GLPM (dlfo_loaded_mappings)[!active_idx];
>    size_t remaining_to_add = current_used + count;
>  
>    /* remaining_to_add can be 0 if (current_used + count) wraps, but in practice
> @@ -687,7 +642,8 @@ _dl_find_object_update_1 (struct link_map **loaded, size_t count)
>  
>          /* The barrier ensures that a concurrent TM read or fork does
>             not see a partially initialized segment.  */
> -        atomic_store_release (&_dlfo_loaded_mappings[!active_idx], target_seg);
> +        atomic_store_release (&GLPM (dlfo_loaded_mappings)[!active_idx],
> +                              target_seg);
>        }
>      else
>        /* Start update cycle without allocation.  */
> @@ -846,20 +802,3 @@ _dl_find_object_dlclose (struct link_map *map)
>          return;
>        }
>  }
> -
> -void
> -_dl_find_object_freeres (void)
> -{
> -  for (int idx = 0; idx < 2; ++idx)
> -    {
> -      for (struct dlfo_mappings_segment *seg = _dlfo_loaded_mappings[idx];
> -           seg != NULL; )
> -        {
> -          struct dlfo_mappings_segment *previous = seg->previous;
> -          free (seg->to_free);
> -          seg = previous;
> -        }
> -      /* Stop searching in shared objects.  */
> -      _dlfo_loaded_mappings[idx] = NULL;
> -    }
> -}
> diff --git a/elf/dl-find_object.h b/elf/dl-find_object.h
> index e433ff8740..cc2ad9a38f 100644
> --- a/elf/dl-find_object.h
> +++ b/elf/dl-find_object.h
> @@ -135,7 +135,4 @@ bool _dl_find_object_update (struct link_map *new_map) attribute_hidden;
>     data structures.  Needs to be protected by loader write lock.  */
>  void _dl_find_object_dlclose (struct link_map *l) attribute_hidden;
>  
> -/* Called from __libc_freeres to deallocate malloc'ed memory.  */
> -void _dl_find_object_freeres (void) attribute_hidden;
> -
>  #endif /* _DL_FIND_OBJECT_H */
> diff --git a/elf/dl-libc_freeres.c b/elf/dl-libc_freeres.c
> index 093724b765..e728f3b9fa 100644
> --- a/elf/dl-libc_freeres.c
> +++ b/elf/dl-libc_freeres.c
> @@ -127,6 +127,4 @@ __rtld_libc_freeres (void)
>    void *scope_free_list = GL(dl_scope_free_list);
>    GL(dl_scope_free_list) = NULL;
>    free (scope_free_list);
> -
> -  _dl_find_object_freeres ();
>  }
> diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
> index d31fa1bb59..42bee8e9ce 100644
> --- a/sysdeps/generic/ldsodefs.h
> +++ b/sysdeps/generic/ldsodefs.h
> @@ -508,6 +508,8 @@ extern struct rtld_global _rtld_global __rtld_global_attribute__;
>  # undef __rtld_global_attribute__
>  #endif
>  
> +struct dlfo_mappings_segment;
> +
>  #ifdef SHARED
>  /* Implementation structure for the protected memory area.  In static
>     builds, the protected memory area is just regular (.data) memory,
> @@ -517,6 +519,13 @@ struct rtld_protmem
>  {
>    /* Structure describing the dynamic linker itself.  */
>    struct link_map _dl_rtld_map;
> +#endif /* SHARED */
> +
> +  /* Two copies of the data structures for _dl_find_object.  See
> +     _dlfo_loaded_mappings_version in dl-find_object.c.  */
> +  EXTERN struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
> +
> +#ifdef SHARED
>  };
>  #endif /* SHARED */
>
  
Florian Weimer Feb. 3, 2025, 3:58 p.m. UTC | #2
* Carlos O'Donell:

> On 2/2/25 4:13 PM, Florian Weimer wrote:
>> The heap is mostly read-only by design, so allocation padding is
>> no longer required.  The protected heap is not visible to malloc,
>> so it's not necessary to deallocate the allocations during
>> __libc_freeres anymore.  Also put critical pointers into
>> the protected memory area.
>> 
>> With this change, all control data for _dl_find_object is either
>> RELRO data, or in the protected area, or tightly constrained
>> (the version counter is always masked using & 1 before array
>> indexing).
>
> It may or may not be relevant, but the Linaro CI tester had an intermittent 
> failure after this patch which caused a segfault in the test.
>
> Have you seen this?
>
> FAIL: resolv/tst-resolv-ai_idn-nolibidn2
> original exit status 1
> Didn't expect signal from child: got `Segmentation fault'

I saw it.  It did not show up in my testing on AArch64 as far as I
recall.

(The test results on AArch64 are quite noisy right now because of new
tests with race conditions.)

Thanks,
Florian
  

Patch

diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index d8d09ffe0b..332f6765a4 100644
--- a/elf/dl-find_object.c
+++ b/elf/dl-find_object.c
@@ -20,6 +20,7 @@ 
 #include <atomic.h>
 #include <atomic_wide_counter.h>
 #include <dl-find_object.h>
+#include <dl-protmem.h>
 #include <dlfcn.h>
 #include <ldsodefs.h>
 #include <link.h>
@@ -91,8 +92,9 @@  static struct dl_find_object_internal *_dlfo_nodelete_mappings
    to avoid data races.
 
    The memory allocations are never deallocated, but slots used for
-   objects that have been dlclose'd can be reused by dlopen.  The
-   memory can live in the regular C malloc heap.
+   objects that have been dlclose'd can be reused by dlopen.
+   Allocations come from the protected memory heap.  This makes it
+   harder to inject DWARF data.
 
    The segments are populated from the start of the list, with the
    mappings with the highest address.  Only if this segment is full,
@@ -111,9 +113,6 @@  struct dlfo_mappings_segment
      initialization; read in the TM region.  */
   struct dlfo_mappings_segment *previous;
 
-  /* Used by __libc_freeres to deallocate malloc'ed memory.  */
-  void *to_free;
-
   /* Count of array elements in use and allocated.  */
   size_t size;                  /* Read in the TM region.  */
   size_t allocated;
@@ -121,13 +120,6 @@  struct dlfo_mappings_segment
   struct dl_find_object_internal objects[]; /* Read in the TM region.  */
 };
 
-/* To achieve async-signal-safety, two copies of the data structure
-   are used, so that a signal handler can still use this data even if
-   dlopen or dlclose modify the other copy.  The the least significant
-   bit in _dlfo_loaded_mappings_version determines which array element
-   is the currently active region.  */
-static struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
-
 /* Returns the number of actually used elements in all segments
    starting at SEG.  */
 static inline size_t
@@ -154,44 +146,15 @@  _dlfo_mappings_segment_count_allocated (struct dlfo_mappings_segment *seg)
 
 /* This is essentially an arbitrary value.  dlopen allocates plenty of
    memory anyway, so over-allocated a bit does not hurt.  Not having
-   many small-ish segments helps to avoid many small binary searches.
-   Not using a power of 2 means that we do not waste an extra page
-   just for the malloc header if a mapped allocation is used in the
-   glibc allocator.  */
-enum { dlfo_mappings_initial_segment_size = 63 };
-
-/* Allocate an empty segment.  This used for the first ever
-   allocation.  */
-static struct dlfo_mappings_segment *
-_dlfo_mappings_segment_allocate_unpadded (size_t size)
-{
-  if (size < dlfo_mappings_initial_segment_size)
-    size = dlfo_mappings_initial_segment_size;
-  /* No overflow checks here because the size is a mapping count, and
-     struct link_map is larger than what we allocate here.  */
-  enum
-    {
-      element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
-    };
-  size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
-                        + size * element_size);
-  struct dlfo_mappings_segment *result = malloc (to_allocate);
-  if (result != NULL)
-    {
-      result->previous = NULL;
-      result->to_free = NULL; /* Minimal malloc memory cannot be freed.  */
-      result->size = 0;
-      result->allocated = size;
-    }
-  return result;
-}
+   many small-ish segments helps to avoid many small binary searches.  */
+enum { dlfo_mappings_initial_segment_size = 64 };
 
 /* Allocate an empty segment that is at least SIZE large.  PREVIOUS
    points to the chain of previously allocated segments and can be
    NULL.  */
 static struct dlfo_mappings_segment *
 _dlfo_mappings_segment_allocate (size_t size,
-                                 struct dlfo_mappings_segment * previous)
+                                 struct dlfo_mappings_segment *previous)
 {
   /* Exponential sizing policies, so that lookup approximates a binary
      search.  */
@@ -200,11 +163,10 @@  _dlfo_mappings_segment_allocate (size_t size,
     if (previous == NULL)
       minimum_growth = dlfo_mappings_initial_segment_size;
     else
-      minimum_growth = 2* previous->allocated;
+      minimum_growth = 2 * previous->allocated;
     if (size < minimum_growth)
       size = minimum_growth;
   }
-  enum { cache_line_size_estimate = 128 };
   /* No overflow checks here because the size is a mapping count, and
      struct link_map is larger than what we allocate here.  */
   enum
@@ -212,36 +174,28 @@  _dlfo_mappings_segment_allocate (size_t size,
       element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
     };
   size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
-                        + size * element_size
-                        + 2 * cache_line_size_estimate);
-  char *ptr = malloc (to_allocate);
-  if (ptr == NULL)
+                        + size * element_size);
+  struct dlfo_mappings_segment *result = _dl_protmem_allocate (to_allocate);
+  if (result == NULL)
     return NULL;
-  char *original_ptr = ptr;
-  /* Start and end at a (conservative) 128-byte cache line boundary.
-     Do not use memalign for compatibility with partially interposing
-     malloc implementations.  */
-  char *end = PTR_ALIGN_DOWN (ptr + to_allocate, cache_line_size_estimate);
-  ptr = PTR_ALIGN_UP (ptr, cache_line_size_estimate);
-  struct dlfo_mappings_segment *result
-    = (struct dlfo_mappings_segment *) ptr;
   result->previous = previous;
-  result->to_free = original_ptr;
   result->size = 0;
-  /* We may have obtained slightly more space if malloc happened
-     to provide an over-aligned pointer.  */
-  result->allocated = (((uintptr_t) (end - ptr)
-                        - sizeof (struct dlfo_mappings_segment))
-                       / element_size);
-  assert (result->allocated >= size);
+  result->allocated = size;
   return result;
 }
 
 /* Monotonic counter for software transactional memory.  The lowest
-   bit indicates which element of the _dlfo_loaded_mappings contains
-   up-to-date data.  */
+   bit indicates which element of the GLPM (dlfo_loaded_mappings)
+   contains up-to-date data.  This achieves async-signal-safety for
+   _dl_find_object: a signal handler can still use the
+   GLPM (dlfo_loaded_mappings) data even if dlopen or dlclose
+   modify the other copy.  */
 static __atomic_wide_counter _dlfo_loaded_mappings_version;
 
+#ifndef SHARED
+struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
+#endif
+
 /* TM version at the start of the read operation.  */
 static inline uint64_t
 _dlfo_read_start_version (void)
@@ -309,7 +263,7 @@  _dlfo_read_success (uint64_t start_version)
 static struct dlfo_mappings_segment *
 _dlfo_mappings_active_segment (uint64_t start_version)
 {
-  return _dlfo_loaded_mappings[start_version & 1];
+  return GLPM (dlfo_loaded_mappings)[start_version & 1];
 }
 
 /* Searches PC among the address-sorted array [FIRST1, FIRST1 +
@@ -518,10 +472,10 @@  _dlfo_process_initial (void)
             }
           else if (l->l_type == lt_loaded)
             {
-              if (_dlfo_loaded_mappings[0] != NULL)
+              if (GLPM (dlfo_loaded_mappings)[0] != NULL)
                 /* Second pass only.  */
                 _dl_find_object_from_map
-                  (l, &_dlfo_loaded_mappings[0]->objects[loaded]);
+                  (l, &GLPM (dlfo_loaded_mappings)[0]->objects[loaded]);
               ++loaded;
             }
         }
@@ -577,13 +531,14 @@  _dl_find_object_init (void)
 
   /* Allocate the data structures.  */
   size_t loaded_size = _dlfo_process_initial ();
-  _dlfo_nodelete_mappings = malloc (_dlfo_nodelete_mappings_size
-                                    * sizeof (*_dlfo_nodelete_mappings));
+  _dlfo_nodelete_mappings
+    = _dl_protmem_allocate (_dlfo_nodelete_mappings_size
+                            * sizeof (*_dlfo_nodelete_mappings));
   if (loaded_size > 0)
-    _dlfo_loaded_mappings[0]
-      = _dlfo_mappings_segment_allocate_unpadded (loaded_size);
+    GLPM (dlfo_loaded_mappings)[0]
+      = _dlfo_mappings_segment_allocate (loaded_size, NULL);
   if (_dlfo_nodelete_mappings == NULL
-      || (loaded_size > 0 && _dlfo_loaded_mappings[0] == NULL))
+      || (loaded_size > 0 && GLPM (dlfo_loaded_mappings)[0] == NULL))
     _dl_fatal_printf ("\
 Fatal glibc error: cannot allocate memory for find-object data\n");
   /* Fill in the data with the second call.  */
@@ -599,8 +554,8 @@  Fatal glibc error: cannot allocate memory for find-object data\n");
       _dlfo_nodelete_mappings_end = _dlfo_nodelete_mappings[last_idx].map_end;
     }
   if (loaded_size > 0)
-    _dlfo_sort_mappings (_dlfo_loaded_mappings[0]->objects,
-                         _dlfo_loaded_mappings[0]->size);
+    _dlfo_sort_mappings (GLPM (dlfo_loaded_mappings)[0]->objects,
+                         GLPM (dlfo_loaded_mappings)[0]->size);
 }
 
 static void
@@ -654,11 +609,11 @@  _dl_find_object_update_1 (struct link_map **loaded, size_t count)
   int active_idx = _dlfo_read_version_locked () & 1;
 
   struct dlfo_mappings_segment *current_seg
-    = _dlfo_loaded_mappings[active_idx];
+    = GLPM (dlfo_loaded_mappings)[active_idx];
   size_t current_used = _dlfo_mappings_segment_count_used (current_seg);
 
   struct dlfo_mappings_segment *target_seg
-    = _dlfo_loaded_mappings[!active_idx];
+    = GLPM (dlfo_loaded_mappings)[!active_idx];
   size_t remaining_to_add = current_used + count;
 
   /* remaining_to_add can be 0 if (current_used + count) wraps, but in practice
@@ -687,7 +642,8 @@  _dl_find_object_update_1 (struct link_map **loaded, size_t count)
 
         /* The barrier ensures that a concurrent TM read or fork does
            not see a partially initialized segment.  */
-        atomic_store_release (&_dlfo_loaded_mappings[!active_idx], target_seg);
+        atomic_store_release (&GLPM (dlfo_loaded_mappings)[!active_idx],
+                              target_seg);
       }
     else
       /* Start update cycle without allocation.  */
@@ -846,20 +802,3 @@  _dl_find_object_dlclose (struct link_map *map)
         return;
       }
 }
-
-void
-_dl_find_object_freeres (void)
-{
-  for (int idx = 0; idx < 2; ++idx)
-    {
-      for (struct dlfo_mappings_segment *seg = _dlfo_loaded_mappings[idx];
-           seg != NULL; )
-        {
-          struct dlfo_mappings_segment *previous = seg->previous;
-          free (seg->to_free);
-          seg = previous;
-        }
-      /* Stop searching in shared objects.  */
-      _dlfo_loaded_mappings[idx] = NULL;
-    }
-}
diff --git a/elf/dl-find_object.h b/elf/dl-find_object.h
index e433ff8740..cc2ad9a38f 100644
--- a/elf/dl-find_object.h
+++ b/elf/dl-find_object.h
@@ -135,7 +135,4 @@  bool _dl_find_object_update (struct link_map *new_map) attribute_hidden;
    data structures.  Needs to be protected by loader write lock.  */
 void _dl_find_object_dlclose (struct link_map *l) attribute_hidden;
 
-/* Called from __libc_freeres to deallocate malloc'ed memory.  */
-void _dl_find_object_freeres (void) attribute_hidden;
-
 #endif /* _DL_FIND_OBJECT_H */
diff --git a/elf/dl-libc_freeres.c b/elf/dl-libc_freeres.c
index 093724b765..e728f3b9fa 100644
--- a/elf/dl-libc_freeres.c
+++ b/elf/dl-libc_freeres.c
@@ -127,6 +127,4 @@  __rtld_libc_freeres (void)
   void *scope_free_list = GL(dl_scope_free_list);
   GL(dl_scope_free_list) = NULL;
   free (scope_free_list);
-
-  _dl_find_object_freeres ();
 }
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index d31fa1bb59..42bee8e9ce 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -508,6 +508,8 @@  extern struct rtld_global _rtld_global __rtld_global_attribute__;
 # undef __rtld_global_attribute__
 #endif
 
+struct dlfo_mappings_segment;
+
 #ifdef SHARED
 /* Implementation structure for the protected memory area.  In static
    builds, the protected memory area is just regular (.data) memory,
@@ -517,6 +519,13 @@  struct rtld_protmem
 {
   /* Structure describing the dynamic linker itself.  */
   struct link_map _dl_rtld_map;
+#endif /* SHARED */
+
+  /* Two copies of the data structures for _dl_find_object.  See
+     _dlfo_loaded_mappings_version in dl-find_object.c.  */
+  EXTERN struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
+
+#ifdef SHARED
 };
 #endif /* SHARED */