@@ -72,6 +72,7 @@ dl-routines = \
dl-open \
dl-origin \
dl-printf \
+ dl-protmem \
dl-reloc \
dl-runtime \
dl-scope \
@@ -117,6 +118,7 @@ elide-routines.os = \
# These object files are only included in the dynamically-linked libc.
shared-only-routines = \
+ dl-protmem \
libc-dl-profile \
libc-dl-profstub \
libc-dl_find_object \
@@ -529,11 +531,13 @@ tests-internal += \
tst-audit19a \
tst-create_format1 \
tst-dl-hwcaps_split \
+ tst-dl-protmem \
tst-dl_find_object \
tst-dl_find_object-threads \
tst-dlmopen2 \
tst-hash-collision3 \
tst-ptrguard1 \
+ tst-relro-linkmap \
tst-stackguard1 \
tst-tls-surplus \
tst-tls3 \
@@ -976,6 +980,9 @@ modules-names += \
tst-recursive-tlsmod13 \
tst-recursive-tlsmod14 \
tst-recursive-tlsmod15 \
+ tst-relro-linkmap-mod1 \
+ tst-relro-linkmap-mod2 \
+ tst-relro-linkmap-mod3 \
tst-relsort1mod1 \
tst-relsort1mod2 \
tst-ro-dynamic-mod \
@@ -3393,3 +3400,8 @@ $(objpfx)tst-nolink-libc-2: $(objpfx)tst-nolink-libc.o
-Wl,--dynamic-linker=$(objpfx)ld.so
$(objpfx)tst-nolink-libc-2.out: $(objpfx)tst-nolink-libc-2 $(objpfx)ld.so
$< > $@ 2>&1; $(evaluate-test)
+
+LDFLAGS-tst-relro-linkmap = -Wl,-E
+$(objpfx)tst-relro-linkmap: $(objpfx)tst-relro-linkmap-mod1.so
+$(objpfx)tst-relro-linkmap.out: $(objpfx)tst-dlopenfailmod1.so \
+ $(objpfx)tst-relro-linkmap-mod2.so $(objpfx)tst-relro-linkmap-mod3.so
@@ -33,6 +33,7 @@
#include <tls.h>
#include <stap-probe.h>
#include <dl-find_object.h>
+#include <dl-protmem.h>
#include <dl-unmap-segments.h>
@@ -130,6 +131,9 @@ _dl_close_worker (struct link_map *map, bool force)
return;
}
+ /* Actual changes are about to happen. */
+ _dl_protmem_begin ();
+
Lmid_t nsid = map->l_ns;
struct link_namespaces *ns = &GL(dl_ns)[nsid];
@@ -260,7 +264,10 @@ _dl_close_worker (struct link_map *map, bool force)
/* Call its termination function. Do not do it for
half-cooked objects. Temporarily disable exception
- handling, so that errors are fatal. */
+ handling, so that errors are fatal.
+
+ Link maps are writable during this call, but avoiding
+ that is probably too costly. */
if (imap->l_rw->l_init_called)
_dl_catch_exception (NULL, _dl_call_fini, imap);
@@ -360,8 +367,11 @@ _dl_close_worker (struct link_map *map, bool force)
newp = (struct r_scope_elem **)
malloc (new_size * sizeof (struct r_scope_elem *));
if (newp == NULL)
- _dl_signal_error (ENOMEM, "dlclose", NULL,
- N_("cannot create scope list"));
+ {
+ _dl_protmem_end ();
+ _dl_signal_error (ENOMEM, "dlclose", NULL,
+ N_("cannot create scope list"));
+ }
}
/* Copy over the remaining scope elements. */
@@ -709,7 +719,7 @@ _dl_close_worker (struct link_map *map, bool force)
if (imap == GL(dl_initfirst))
GL(dl_initfirst) = NULL;
- free (imap);
+ _dl_free_object (imap);
}
}
@@ -758,6 +768,8 @@ _dl_close_worker (struct link_map *map, bool force)
}
dl_close_state = not_pending;
+
+ _dl_protmem_end ();
}
@@ -18,6 +18,7 @@
#include <ldsodefs.h>
#include <dl-find_object.h>
+#include <dl-protmem.h>
static bool
free_slotinfo (struct dtv_slotinfo_list **elemp)
@@ -52,6 +53,10 @@ __rtld_libc_freeres (void)
struct link_map *l;
struct r_search_path_elem *d;
+ /* We are about to write to link maps. This is not paired with
+ _dl_protmem_end because the process is going away anyway. */
+ _dl_protmem_begin ();
+
/* Remove all search directories. */
d = GL(dl_all_dirs);
while (d != GLRO(dl_init_all_dirs))
@@ -34,6 +34,7 @@
#include <gnu/lib-names.h>
#include <dl-tunables.h>
#include <alloc_buffer.h>
+#include <dl-protmem.h>
/* Type for the buffer we put the ELF header and hopefully the program
header. This buffer does not really have to be too large. In most
@@ -962,7 +963,8 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
free (l->l_libname);
if (l != NULL && l->l_phdr_allocated)
free ((void *) l->l_phdr);
- free (l);
+ if (l != NULL)
+ _dl_free_object (l);
free (realname);
_dl_signal_error (errval, name, NULL, errstring);
}
@@ -2214,6 +2216,22 @@ add_path (struct add_path_state *p, const struct r_search_path_struct *sps,
}
}
+/* Wrap cache_rpath to unprotect memory first if necessary. */
+static bool
+cache_rpath_unprotect (struct link_map *l,
+ struct r_search_path_struct *sp,
+ int tag,
+ const char *what,
+ bool *unprotected)
+{
+ if (sp->dirs == NULL && !*unprotected)
+ {
+ _dl_protmem_begin ();
+ *unprotected = true;
+ }
+ return cache_rpath (l, sp, tag, what);
+}
+
void
_dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
{
@@ -2230,6 +2248,7 @@ _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
.si = si,
.allocptr = (char *) &si->dls_serpath[si->dls_cnt]
};
+ bool unprotected = false;
# define add_path(p, sps, flags) add_path(p, sps, 0) /* XXX */
@@ -2242,7 +2261,8 @@ _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
struct link_map *l = loader;
do
{
- if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
+ if (cache_rpath_unprotect (l, &l->l_rpath_dirs, DT_RPATH,
+ "RPATH", &unprotected))
add_path (&p, &l->l_rpath_dirs, XXX_RPATH);
l = l->l_loader;
}
@@ -2253,7 +2273,8 @@ _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
{
l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
if (l != NULL && l->l_type != lt_loaded && l != loader)
- if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
+ if (cache_rpath_unprotect (l, &l->l_rpath_dirs, DT_RPATH,
+ "RPATH", &unprotected))
add_path (&p, &l->l_rpath_dirs, XXX_RPATH);
}
}
@@ -2262,7 +2283,8 @@ _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
add_path (&p, &__rtld_env_path_list, XXX_ENV);
/* Look at the RUNPATH information for this binary. */
- if (cache_rpath (loader, &loader->l_runpath_dirs, DT_RUNPATH, "RUNPATH"))
+ if (cache_rpath_unprotect (loader, &loader->l_runpath_dirs, DT_RUNPATH,
+ "RUNPATH", &unprotected))
add_path (&p, &loader->l_runpath_dirs, XXX_RUNPATH);
/* XXX
@@ -2277,4 +2299,7 @@ _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
/* Count the struct size before the string area, which we didn't
know before we completed dls_cnt. */
si->dls_size += (char *) &si->dls_serpath[si->dls_cnt] - (char *) si;
+
+ if (unprotected)
+ _dl_protmem_end ();
}
@@ -21,6 +21,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <ldsodefs.h>
+#include <dl-protmem.h>
#include <assert.h>
@@ -89,15 +90,19 @@ _dl_new_object (char *realname, const char *libname, int type,
# define audit_space 0
#endif
- new = calloc (sizeof (*new)
- + sizeof (struct link_map_private *)
- + sizeof (*newname) + libname_len, 1);
+ size_t l_size = (sizeof (*new)
+ + sizeof (struct link_map_private *)
+ + sizeof (*newname) + libname_len);
+
+ new = _dl_protmem_allocate (l_size);
if (new == NULL)
return NULL;
+ memset (new, 0, sizeof (*new));
+ new->l_size = l_size;
new->l_rw = calloc (1, sizeof (*new->l_rw) + audit_space);
if (new->l_rw == NULL)
{
- free (new);
+ _dl_protmem_free (new, l_size);
return NULL;
}
@@ -107,7 +112,7 @@ _dl_new_object (char *realname, const char *libname, int type,
new->l_libname = newname
= (struct libname_list *) (new->l_symbolic_searchlist.r_list + 1);
newname->name = (char *) memcpy (newname + 1, libname, libname_len);
- /* newname->next = NULL; We use calloc therefore not necessary. */
+ newname->next = NULL;
newname->dont_free = 1;
/* When we create the executable link map, or a VDSO link map, we start
@@ -142,12 +147,9 @@ _dl_new_object (char *realname, const char *libname, int type,
#ifdef SHARED
for (unsigned int cnt = 0; cnt < naudit; ++cnt)
- /* No need to initialize bindflags due to calloc. */
link_map_audit_state (new, cnt)->cookie = (uintptr_t) new;
#endif
- /* new->l_global = 0; We use calloc therefore not necessary. */
-
/* Use the 'l_scope_mem' array by default for the 'l_scope'
information. If we need more entries we will allocate a large
array dynamically. */
@@ -266,3 +268,9 @@ _dl_new_object (char *realname, const char *libname, int type,
return new;
}
+
+void
+_dl_free_object (struct link_map *l)
+{
+ _dl_protmem_free (l, l->l_size);
+}
@@ -37,6 +37,7 @@
#include <libc-early-init.h>
#include <gnu/lib-names.h>
#include <dl-find_object.h>
+#include <dl-protmem.h>
#include <dl-prop.h>
@@ -172,6 +173,8 @@ add_to_global_update (struct link_map *new)
{
struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
+ _dl_protmem_begin ();
+
/* Now add the new entries. */
unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
@@ -202,6 +205,8 @@ add_to_global_update (struct link_map *new)
atomic_write_barrier ();
ns->_ns_main_searchlist->r_nlist = new_nlist;
+
+ _dl_protmem_end ();
}
/* Search link maps in all namespaces for the DSO that contains the object at
@@ -515,6 +520,11 @@ dl_open_worker_begin (void *a)
const char *file = args->file;
int mode = args->mode;
+ /* Prepare for link map updates. If dl_open_worker below returns
+ normally, a matching _dl_protmem_end call is performed there. On
+ an exception, the handler in the caller has to perform it. */
+ _dl_protmem_begin ();
+
/* The namespace ID is now known. Keep track of whether libc.so was
already loaded, to determine whether it is necessary to call the
early initialization routine (or clear libc_map on error). */
@@ -778,6 +788,10 @@ dl_open_worker (void *a)
_dl_signal_exception (err, &ex, NULL);
}
+ /* Make state read-only before running user code in ELF
+ constructors. */
+ _dl_protmem_end ();
+
if (!args->worker_continue)
return;
@@ -927,6 +941,10 @@ no more namespaces available for dlmopen()"));
the flag here. */
}
+ /* Due to the exception, we did not end the protmem transaction
+ before. */
+ _dl_protmem_end ();
+
/* Release the lock. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
new file mode 100644
@@ -0,0 +1,66 @@
+/* Protected memory allocator for ld.so. Internal interfaces.
+ Copyright (C) 2025 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* These declarations are needed by <dl-protmem_bootstrap.h>, which
+ has to be inlined into _dl_start. */
+
+#ifndef DL_PROTMEM_INTERNAL_H
+#define DL_PROTMEM_INTERNAL_H
+
+/* Minimum chunk size. Used to preserve alignment. */
+enum { _dlpm_chunk_minimal_size = 8 };
+
+/* The initial allocation covers about 150 link maps, which should be
+ enough for most programs. */
+#if __WORDSIZE == 32
+# define DL_PROTMEM_INITIAL_REGION_SIZE 131072
+#else
+# define DL_PROTMEM_INITIAL_REGION_SIZE 262144
+#endif
+
+#define DL_PROTMEM_REGION_COUNT 12
+
+/* Struct tag denoting freelist entries. */
+struct dl_protmem_freelist_chunk;
+
+/* Global state for the protected memory allocator. */
+struct dl_protmem_state
+{
+ /* GLRO (dl_protmem) points to this field. */
+ struct rtld_protmem protmem
+ __attribute__ ((__aligned__ (_dlpm_chunk_minimal_size)));
+
+ /* Pointers to mmap-allocated regions. For index i, the size of the
+ allocation is DL_PROTMEM_INITIAL_ALLOCATION << i. The space of
+ the combined regions is sufficient for hundreds of thousands of
+ link maps, so the dynamic linker runs into scalability issues
+ well before it is exhausted. */
+ void *regions[DL_PROTMEM_REGION_COUNT];
+
+ /* List of unused allocation for each region, in increasing address
+ order. See _dlpm_chunk_size for how the freed chunk size is
+ encoded. */
+ struct dl_protmem_freelist_chunk *freelist[DL_PROTMEM_REGION_COUNT];
+
+ /* One cached free chunk, used to avoid scanning freelist for
+ adjacent deallocations. Tracking these chunks per region avoids
+ accidental merging across regions. */
+ struct dl_protmem_freelist_chunk *pending_free[DL_PROTMEM_REGION_COUNT];
+};
+
+#endif /* DL_PROTMEM_INTERNAL_H */
new file mode 100644
@@ -0,0 +1,425 @@
+/* Protected memory allocator for ld.so.
+ Copyright (C) 2025 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <ldsodefs.h>
+
+#include <dl-protmem.h>
+#include <dl-protmem-internal.h>
+
+#include <array_length.h>
+#include <assert.h>
+#include <sys/mman.h>
+
+/* Nesting counter for _dl_protmem_begin/_dl_protmem_end. This is
+ primaryly required because we may have a call sequence dlopen,
+ malloc, dlopen. Without the counter, _dl_protmem_end in the inner
+ dlopen would make a link map that is still being initialized
+ read-only. */
+static unsigned int _dl_protmem_begin_count;
+
+static inline struct dl_protmem_state *
+_dl_protmem_state (void)
+{
+ return ((void *) GLRO (dl_protmem)
+ - offsetof (struct dl_protmem_state, protmem));
+}
+
+/* Address of a chunk on the free list. This is an abstract pointer,
+ never to be dereferenced explictly. Use the accessor functions
+ below instead.
+
+ Metadata layout: The first word is the pointer to the next chunk,
+ except the that the lowest bit (unused due to alignment) is used as
+ a flag. If it is 1, the chunk size is the minimal size, and the
+ size is not stored separately. If the flag is 0, the size is
+ stored in the second metadata word. */
+typedef struct dl_protmem_freelist_chunk *chunk;
+
+/* Returns the size of a chunk on the free list whose start address is
+ FREEPTR. The size includes the metadata. */
+static inline size_t
+_dlpm_chunk_size (chunk ptr)
+{
+ uintptr_t *p = (uintptr_t *)ptr;
+ if (*p & 1)
+ return _dlpm_chunk_minimal_size;
+ else
+ return p[1];
+}
+
+/* Returns the address of the next free list element. */
+static inline chunk
+_dlpm_chunk_next (chunk ptr)
+{
+ uintptr_t *p = (uintptr_t *)ptr;
+ /* Mask away the size bit. */
+ return (chunk) (*p & -2);
+}
+
+static inline void
+_dlpm_chunk_set_next (chunk ptr, chunk newnext)
+{
+ /* Preserve the value of the size bit. */
+ uintptr_t *p = (uintptr_t *)ptr;
+ *p = (uintptr_t) newnext | (*p & 1);
+}
+
+/* Creates a new freelist chunk at PTR, with NEXT as the next chunk,
+ and SIZE as the size of this chunk (which includes the
+ metadata). Returns PTR. */
+static inline chunk
+_dlpm_chunk_make (chunk ptr, chunk next, size_t size)
+{
+ uintptr_t *p = (uintptr_t *)ptr;
+ if (size <= _dlpm_chunk_minimal_size)
+ /* Compressed size. */
+ *p = (uintptr_t) next | 1;
+ else
+ {
+ p[0] = (uintptr_t) next;
+ p[1] = size;
+ }
+ return ptr;
+}
+
+/* Return true if PTR2 comes immediately after PTR1 in memory. PTR2
+ can be NULL. */
+static inline bool
+_dlpm_chunk_adjancent (chunk ptr1, chunk ptr2)
+{
+ return (uintptr_t) ptr2 == (uintptr_t) ptr1 + _dlpm_chunk_size (ptr1);
+}
+
+/* Put the pending allocation on the free list. */
+static void
+_dlpm_free_pending (struct dl_protmem_state *state, unsigned int region)
+{
+ chunk pending = state->pending_free[region];
+ state->pending_free[region] = NULL;
+
+ /* The current chunk pointer. In the while loop below, coalescing
+ potentially happens at the end of this chunk, so that the chunk
+ address does not change. */
+ chunk current = state->freelist[region];
+
+ /* Special cases before loop start. */
+
+ if (current == NULL)
+ {
+ /* The freelist is empty. Nothing to coalesce. */
+ state->freelist[region] = pending;
+ return;
+ }
+
+ /* During the loop below, this merge is handled as part of the next
+ chunk processing. */
+ if (pending < current)
+ {
+ /* The new chunk will be first on the freelist. */
+ state->freelist[region] = pending;
+
+ /* See if we can coalesce. */
+ if (_dlpm_chunk_adjancent (pending, current))
+ {
+ chunk new_next = _dlpm_chunk_next (current);
+ size_t new_size = (_dlpm_chunk_size (pending)
+ + _dlpm_chunk_size (current));
+ _dlpm_chunk_make (pending, new_next, new_size);
+ }
+ else
+ _dlpm_chunk_set_next (pending, current);
+ return;
+ }
+
+ while (true)
+ {
+ chunk next = _dlpm_chunk_next (current);
+ if (_dlpm_chunk_adjancent (current, pending))
+ {
+ /* We can coalesce. See if this completely fills a gap. */
+ if (_dlpm_chunk_adjancent (pending, next))
+ {
+ /* Merge three chunks. */
+ chunk new_next = _dlpm_chunk_next (next);
+ size_t new_size = (_dlpm_chunk_size (current)
+ + _dlpm_chunk_size (pending)
+ + _dlpm_chunk_size (next));
+ /* The address of the current chunk does not change, so
+ the next pointer leading to it remains valid. */
+ _dlpm_chunk_make (current, new_next, new_size);
+ }
+ else
+ {
+ /* Merge two chunks. */
+ size_t new_size = (_dlpm_chunk_size (current)
+ + _dlpm_chunk_size (pending));
+ /* The current chunk pointer remains unchanged. */
+ _dlpm_chunk_make (current, next, new_size);
+ }
+ break;
+ }
+ if (next == NULL)
+ {
+ /* New last chunk on freelist. */
+ _dlpm_chunk_set_next (current, pending);
+ break;
+ }
+ if (pending < next)
+ {
+ /* This is the right spot on the freelist. */
+ _dlpm_chunk_set_next (current, pending);
+
+ /* See if we can coalesce with the next chunk. */
+ if (_dlpm_chunk_adjancent (pending, next))
+ {
+ chunk new_next = _dlpm_chunk_next (next);
+ size_t new_size = (_dlpm_chunk_size (pending)
+ + _dlpm_chunk_size (next));
+ _dlpm_chunk_make (pending, new_next, new_size);
+ }
+ else
+ _dlpm_chunk_set_next (pending, next);
+ break;
+ }
+ current = next;
+ }
+}
+
+/* Returns the region index for the pointer. Terminates the process
+ if PTR is not on the heap. */
+static unsigned int
+_dlpm_find_region (struct dl_protmem_state *state, void *ptr)
+{
+ /* Find the region in which the pointer is located. */
+ size_t region_size = DL_PROTMEM_INITIAL_REGION_SIZE;
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ {
+ if (ptr >= state->regions[i] && ptr < state->regions[i] + region_size)
+ return i;
+ region_size *= 2;
+ }
+
+ _dl_fatal_printf ("\
+Fatal glibc error: Protected memory allocation not found\n");
+}
+
+void
+_dl_protmem_init (void)
+{
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ state->regions[0] = state;
+ /* The part of the region after the allocator state (with the
+ embeded protected memory area) is unused. */
+ state->freelist[0] = (chunk) (state + 1);
+ void *initial_region_end = (void *) state + DL_PROTMEM_INITIAL_REGION_SIZE;
+ _dlpm_chunk_make (state->freelist[0], NULL,
+ initial_region_end - (void *) state->freelist[0]);
+ _dl_protmem_begin_count = 1;
+}
+
+void
+_dl_protmem_begin (void)
+{
+ if (_dl_protmem_begin_count++ != 0)
+ /* Already unprotected. */
+ return;
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ size_t region_size = DL_PROTMEM_INITIAL_REGION_SIZE;
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ if (state->regions[i] != NULL)
+ {
+ if (__mprotect (state->regions[i], region_size,
+ PROT_READ | PROT_WRITE) != 0)
+ _dl_signal_error (ENOMEM, NULL, NULL,
+ "Cannot make protected memory writable");
+ region_size *= 2;
+ }
+}
+
+void
+_dl_protmem_end (void)
+{
+ if (--_dl_protmem_begin_count > 0)
+ return;
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ size_t region_size = DL_PROTMEM_INITIAL_REGION_SIZE;
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ if (state->regions[i] != NULL)
+ /* Ignore errors here because we can continue running with
+ read-write memory, with reduced hardening. */
+ (void) __mprotect (state->regions[i], region_size, PROT_READ);
+}
+
+void *
+_dl_protmem_allocate (size_t requested_size)
+{
+ /* Round up the size to the next multiple of 8, to preserve chunk
+ alignment. */
+ {
+ size_t adjusted_size = roundup (requested_size, _dlpm_chunk_minimal_size);
+ if (adjusted_size < requested_size)
+ return NULL; /* Overflow. */
+ requested_size = adjusted_size;
+ }
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+
+ /* Try to find an exact match among the pending chunks. */
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ {
+ chunk pending = state->pending_free[i];
+ if (pending == NULL)
+ continue;
+ size_t pending_size = _dlpm_chunk_size (pending);
+ if (pending_size == requested_size)
+ {
+ state->pending_free[i] = NULL;
+ return pending;
+ }
+ }
+
+ /* Remove all pending allocations. */
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ if (state->pending_free[i] != NULL)
+ _dlpm_free_pending (state, i);
+
+ /* This points to the previous chunk of the best chunk found so far,
+ or the root of the freelist. This place needs to be updated to
+ remove the best chunk from the freelist. */
+ chunk best_previous_p = NULL;
+ size_t best_p_size = -1;
+
+ /* Best-fit search along the free lists. */
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ if (state->freelist[i] != NULL)
+ {
+ /* Use the head pointer of the list as the next pointer.
+ The missing size field is not updated below. */
+ chunk last_p = (chunk) &state->freelist[i];
+ chunk p = state->freelist[i];
+ while (true)
+ {
+ size_t candidate_size = _dlpm_chunk_size (p);
+ chunk next_p = _dlpm_chunk_next (p);
+ if (candidate_size == requested_size)
+ {
+ /* Perfect fit. No further search needed.
+ Remove this chunk from the free list. */
+ _dlpm_chunk_set_next (last_p, next_p);
+ return p;
+ }
+ if (candidate_size > requested_size
+ && candidate_size < best_p_size)
+ /* Chunk with a better usable size. */
+ {
+ best_previous_p = last_p;
+ best_p_size = candidate_size;
+ }
+ if (next_p == NULL)
+ break;
+ last_p = p;
+ p = next_p;
+ }
+ }
+
+ if (best_previous_p == NULL)
+ {
+ /* No usable chunk found. Grow the heap. */
+ size_t region_size = DL_PROTMEM_INITIAL_REGION_SIZE;
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ {
+ if (state->regions[i] == NULL && region_size >= requested_size)
+ {
+ void *ptr = __mmap (NULL, region_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (ptr == MAP_FAILED)
+ return NULL;
+ state->regions[i] = ptr;
+ if (region_size == requested_size)
+ /* Perfect fit: the entire region serves as the allocation. */
+ return ptr;
+
+ /* Create a free list with one entry for the entire region. */
+ state->freelist[i] = _dlpm_chunk_make (ptr, NULL, region_size);
+ best_previous_p = (chunk) &state->freelist[i];
+ best_p_size = region_size;
+
+ /* Chunk is split below. */
+ break;
+ }
+ region_size *= 2;
+ }
+
+ /* All regions have been exhausted. */
+ if (best_previous_p == NULL)
+ return NULL;
+ }
+
+ /* Split the chunk. */
+ chunk p = _dlpm_chunk_next (best_previous_p);
+ void *p_end = (void *) p + best_p_size; /* Memory after this chunk. */
+ chunk p_next = _dlpm_chunk_next (p); /* Following chunk on freelist. */
+ void *remaining = (void *) p + requested_size; /* Place of the new chunk. */
+ /* Replace the chunk on the free list with its remainder. */
+ _dlpm_chunk_set_next (best_previous_p,
+ _dlpm_chunk_make (remaining,
+ p_next, p_end - remaining));
+ return p;
+}
+
+void
+_dl_protmem_free (void *ptr, size_t requested_size)
+{
+ requested_size = roundup (requested_size, _dlpm_chunk_minimal_size);
+
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ unsigned int region = _dlpm_find_region (state, ptr);
+
+ {
+ chunk pending = state->pending_free[region];
+ if (pending != NULL)
+ {
+ /* First try merging with the old allocation. */
+ if (_dlpm_chunk_adjancent (pending, ptr))
+ {
+ /* Extend the existing pending chunk. The start address does
+ not change. */
+ _dlpm_chunk_make (pending, NULL,
+ _dlpm_chunk_size (pending) + requested_size);
+ return;
+ }
+ if (_dlpm_chunk_adjancent (ptr, pending))
+ {
+ /* Create a new chunk that has the exsting chunk at the end. */
+ state->pending_free[region]
+ = _dlpm_chunk_make (ptr, NULL,
+ requested_size + _dlpm_chunk_size (pending));
+ return;
+ }
+
+ /* Merging did not work out. Get rid of the old pending
+ allocation. */
+ _dlpm_free_pending (state, region);
+ }
+ }
+
+ /* No pending allocation at this point. Create new free chunk. */
+ state->pending_free[region] = _dlpm_chunk_make (ptr, NULL, requested_size);
+}
new file mode 100644
@@ -0,0 +1,93 @@
+/* Protected memory allocator for ld.so.
+ Copyright (C) 2025 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* The protected memory allocation manages the memory for the GLPM
+ variables (in shared builds), and for additional memory managed by
+ _dl_protmem_allocate and _dl_protmem_free.
+
+ After a call to _dl_protmem_begin and until the matching call to
+ _dl_protmem_end, the GLPM variables and memory allocated using
+ _dl_protmem_allocate is writable. _dl_protmem_begin and
+ _dl_protmem_end calls can be nested. In this case, only the
+ outermost _dl_protmem_end call makes memory read-only. */
+
+#ifndef DL_PROTMEM_H
+#define DL_PROTMEM_H
+
+#include <stddef.h>
+
+#ifdef SHARED
+/* Must be called after _dl_allocate_rtld_map and before any of the
+ functions below. Implies the first _dl_protmem_begin call. */
+void _dl_protmem_init (void) attribute_hidden;
+
+/* Frees memory allocated using _dl_protmem_allocate. The passed size
+ must be the same that was passed to _dl_protmem_allocate.
+ Protected memory must be writable when this function is called. */
+void _dl_protmem_free (void *ptr, size_t size) attribute_hidden;
+
+/* Allocate protected memory of SIZE bytes. Returns NULL on
+ allocation failure. Protected memory must be writable when this
+ function is called. The allocation will be writable and contains
+ unspecified bytes (similar to malloc). */
+void *_dl_protmem_allocate (size_t size) attribute_hidden
+ __attribute_malloc__ __attribute_alloc_size__ ((1))
+ __attr_dealloc (_dl_protmem_free, 1);
+
+/* _dl_protmem_begin makes protected memory writable, and
+ _dl_protmem_end makes it read-only again. Calls to these functions
+ must be paired. Within this region, protected memory is writable.
+ See the initial description above.
+
+ Failure to make memory writable in _dl_protmem_end is communicated
+ via an ld.so exception, typically resulting in a dlopen failure.
+ This can happen after a call to fork if memory overcommitment is
+ disabled. */
+void _dl_protmem_begin (void) attribute_hidden;
+void _dl_protmem_end (void) attribute_hidden;
+
+#else /*!SHARED */
+/* The protected memory allocator does not exist for static builds.
+ Use malloc directly. */
+
+#include <stdlib.h>
+
+static inline void *
+_dl_protmem_allocate (size_t size)
+{
+ return calloc (size, 1);
+}
+
+static inline void
+_dl_protmem_free (void *ptr, size_t size)
+{
+ free (ptr);
+}
+
+static inline void
+_dl_protmem_begin (void)
+{
+}
+
+static inline void
+_dl_protmem_end (void)
+{
+}
+#endif /* !SHARED */
+
+#endif /* DL_PROTMEM_H */
@@ -17,6 +17,7 @@
<https://www.gnu.org/licenses/>. */
#include <dl-early_mmap.h>
+#include <dl-protmem-internal.h>
/* Return a pointer to the protected memory area, or NULL if
allocation fails. This function is called before self-relocation,
@@ -25,5 +26,11 @@
static inline __attribute__ ((always_inline)) struct rtld_protmem *
_dl_protmem_bootstrap (void)
{
- return _dl_early_mmap (sizeof (struct rtld_protmem));
+ /* The protected memory area is nested within the bootstrap
+ allocation. */
+ struct dl_protmem_state *ptr
+ = _dl_early_mmap (DL_PROTMEM_INITIAL_REGION_SIZE);
+ if (ptr == NULL)
+ return NULL;
+ return &ptr->protmem;
}
@@ -54,6 +54,7 @@
#include <dl-audit-check.h>
#include <dl-call_tls_init_tp.h>
#include <dl-protmem_bootstrap.h>
+#include <dl-protmem.h>
#include <assert.h>
@@ -463,6 +464,10 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
if (GLRO (dl_protmem) == NULL)
_dl_fatal_printf ("Fatal glibc error: Cannot allocate link map\n");
+ /* Set up the protected memory allocator, transferring the rtld link
+ map allocation in GLRO (dl_rtld_map). */
+ _dl_protmem_init ();
+
__rtld_malloc_init_stubs ();
/* Do not use an initializer for these members because it would
@@ -2353,6 +2358,11 @@ dl_main (const ElfW(Phdr) *phdr,
_dl_relocate_object might need to call `mprotect' for DT_TEXTREL. */
_dl_sysdep_start_cleanup ();
+ /* Most of the initialization work has happened by this point, and
+ it should not be necessary to make the link maps read-write after
+ this point. */
+ _dl_protmem_end ();
+
/* Notify the debugger all new objects are now ready to go. We must re-get
the address since by now the variable might be in another object. */
r = _dl_debug_update (LM_ID_BASE);
new file mode 100644
@@ -0,0 +1,350 @@
+/* Internal test for the protected memory allocator.
+ Copyright (C) 2025 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <array_length.h>
+#include <libc-diag.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <support/check.h>
+#include <support/xunistd.h>
+#include <sys/mman.h>
+
+static int do_test (void);
+#include <support/test-driver.c>
+
+/* Tracking allocated memory. Allocation granularity is assumed to be
+ 8 bytes. */
+
+/* Lowest level. Covers 65536 * 32 * 8 bytes (24 bit of address space). */
+struct level3
+{
+ uint32_t bits[1 << 16];
+};
+
+/* Mid-level covers. 20 bits of address space. */
+struct level2
+{
+ struct level3 *level2[1 << 20];
+};
+
+/* Top level. 20 bits of address space. */
+static struct level2 *level1[1 << 20];
+
+/* Byte address to index in level1. */
+static inline unsigned int
+level1_index (uintptr_t u)
+{
+#if UINTPTR_WIDTH > 44
+ return u >> 44;
+#else
+ return 0;
+#endif
+}
+
+/* Byte address to index in level1[N]->level2. */
+static inline unsigned int
+level2_index (uintptr_t u)
+{
+ return (u >> 24) & ((1 << 20) - 1);
+}
+
+/* Byte address to index in level1[N]->level2[M]->level3. */
+static inline unsigned int
+level3_index (uintptr_t u)
+{
+ unsigned int a = u >> 3; /* Every 8th byte tracked. */;
+ return (a >> 5) & ((1 << 16) - 1);
+}
+
+/* Mask for the bit in level3_index. */
+static inline uint32_t
+level3_mask (uintptr_t u)
+{
+ return (uint32_t) 1U << ((u >> 3) & 31);
+}
+
+/* Flip a bit from unset to set. Return false if the bit was already set. */
+static bool
+set_unset_bit_at (void *p)
+{
+ uintptr_t u = (uintptr_t) p;
+ struct level2 *l2 = level1[level1_index (u)];
+ if (l2 == NULL)
+ {
+ l2 = xmmap (NULL, sizeof (*l2), PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1);
+ level1[level1_index (u)] = l2;
+ }
+ struct level3 *l3 = l2->level2[level2_index (u)];
+ if (l3 == NULL)
+ {
+ l3 = xmmap (NULL, sizeof (*l3), PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1);
+ l2->level2[level2_index (u)] = l3;
+ }
+ unsigned int idx = level3_index (u);
+ uint32_t mask = level3_mask (u);
+ if (l3->bits[idx] & mask)
+ return false;
+ l3->bits[idx] |= mask;
+ return true;
+}
+
+/* Flip a bit from set to unset. Return false if the bit was already
+ cleared. */
+static bool
+clear_set_bit_at (void *p)
+{
+ uintptr_t u = (uintptr_t) p;
+ struct level2 *l2 = level1[level1_index (u)];
+ if (l2 == NULL)
+ return false;
+ struct level3 *l3 = l2->level2[level2_index (u)];
+ if (l3 == NULL)
+ return false;
+ unsigned int idx = level3_index (u);
+ uint32_t mask = level3_mask (u);
+ if (!(l3->bits[idx] & mask))
+ return false;
+ l3->bits[idx] &= ~mask;
+ return true;
+}
+
+/* Record an allocation in the bitmap. Errors if the covered bytes
+ are already allocated. */
+static void
+record_allocate (void *p, size_t size)
+{
+ TEST_VERIFY_EXIT (p != NULL);
+ TEST_VERIFY_EXIT (size > 0);
+ if (((uintptr_t) p & 7) != 0)
+ FAIL_EXIT1 ("unaligned allocation: %p of %zu bytes", p, size);
+ for (size_t i = 0; i < size; i += 8)
+ if (!set_unset_bit_at (p + i))
+ FAIL_EXIT1 ("already allocated byte %p in %zu-byte allocation at %p"
+ " (offset %zu)", p + i, size, p, i);
+}
+
+/* Record a deallocation in the bitmap. Errors if the covered bytes
+ are not allcoated. */
+static void
+record_free (void *p, size_t size)
+{
+ TEST_VERIFY_EXIT (p != NULL);
+ TEST_VERIFY_EXIT (size > 0);
+ if (((uintptr_t) p & 7) != 0)
+ FAIL_EXIT1 ("unaligned free: %p of %zu bytes", p, size);
+ for (size_t i = 0; i < size; i += 8)
+ if (!clear_set_bit_at (p + i))
+ FAIL_EXIT1 ("already deallocated byte %p in %zu-byte deallocation at %p"
+ " (offset %zu)", p + i, size, p, i);
+}
+
+/* This hack results in a definition of struct rtld_global_ro and
+ related data structures. Do this after all the other header
+ inclusions, to minimize the impact. */
+#define SHARED
+#include <ldsodefs.h>
+
+/* Create our own version of GLRO (dl_protmem). */
+static struct rtld_protmem *dl_protmem;
+#undef GLRO
+#define GLRO(x) x
+
+#define SHARED
+#include <dl-protmem.h>
+#include <dl-protmem.c>
+#include <sysdeps/generic/dl-early_mmap.h> /* Avoid direct system call. */
+#include <dl-protmem_bootstrap.h>
+
+/* Return the allocation bit for an address. */
+static bool
+bit_at (void *p)
+{
+ uintptr_t u = (uintptr_t) p;
+ struct level2 *l2 = level1[level1_index (u)];
+ if (l2 == NULL)
+ return false;
+ struct level3 *l3 = l2->level2[level2_index (u)];
+ if (l3 == NULL)
+ return false;
+ unsigned int idx = level3_index (u);
+ uint32_t mask = level3_mask (u);
+ return l3->bits[idx] & mask;
+}
+
+/* Assert that SIZE bytes at P are unallocated. */
+static void
+check_free_chunk (void *p, size_t size)
+{
+ if (((uintptr_t) p & 7) != 0)
+ FAIL_EXIT1 ("unaligned free chunk: %p of %zu bytes", p, size);
+ for (size_t i = 0; i < size; i += 8)
+ if (bit_at (p + i))
+ FAIL_EXIT1 ("allocated byte %p in free chunk at %p (%zu bytes,"
+ " offset %zu)", p + i, p, size, i);
+}
+
+/* Dump statistics for the allocator regions (freelist length, maximum
+ free allocation size). If VERBOSE, log the entire freelist. */
+static void
+dump_regions (bool verbose)
+{
+ struct dl_protmem_state *state = _dl_protmem_state ();
+ for (unsigned int i = 0; i < array_length (state->regions); ++i)
+ {
+ if (verbose && state->regions[i] != NULL)
+ printf (" region %u at %p\n", i, state->regions[i]);
+
+ chunk pending = state->pending_free[i];
+ unsigned int count;
+ unsigned int max_size;
+ if (pending == NULL)
+ {
+ count = 0;
+ max_size = 0;
+ }
+ else
+ {
+ count = 1;
+ max_size = _dlpm_chunk_size (pending);
+ check_free_chunk (pending, max_size);
+ if (verbose)
+ printf (" pending free chunk %p, %u\n", pending, max_size);
+ }
+
+ uintptr_t last = 0;
+ for (chunk c = state->freelist[i]; c != NULL; c = _dlpm_chunk_next (c))
+ {
+ ++count;
+ size_t sz = _dlpm_chunk_size (c);
+ if (verbose)
+ printf (" free chunk %p, %zu\n", c, sz);
+ check_free_chunk (c, sz);
+ if (sz > max_size)
+ max_size = sz;
+ TEST_VERIFY ((uintptr_t) c > last);
+ last = (uintptr_t) c;
+ }
+
+ if (count > 0)
+ {
+ if (verbose)
+ printf (" ");
+ else
+ printf (" region %u at %p: ", i, state->regions[i]);
+ printf ("freelist length %u, maximum size %u\n", count, max_size);
+ }
+ }
+}
+
+
+static int
+do_test (void)
+{
+ dl_protmem = _dl_protmem_bootstrap ();
+ _dl_protmem_init ();
+
+ /* Perform a random allocations in a loop. */
+ srand (1);
+ {
+ struct allocation
+ {
+ void *ptr;
+ size_t size;
+ } allocations[10007] = {};
+ for (unsigned int i = 0; i < 20 * 1000; ++i)
+ {
+ struct allocation *a
+ = &allocations[rand () % array_length (allocations)];
+ if (a->ptr == NULL)
+ {
+ a->size = 8 * ((rand() % 37) + 1);
+ a->ptr = _dl_protmem_allocate (a->size);
+ record_allocate (a->ptr, a->size);
+ /* Clobber the new allocation, in case some metadata still
+ references it. */
+ memset (a->ptr, 0xcc, a->size);
+ }
+ else
+ {
+ record_free (a->ptr, a->size);
+ _dl_protmem_free (a->ptr, a->size);
+ a->ptr = NULL;
+ a->size = 0;
+ }
+ }
+
+ puts ("info: after running test loop");
+ dump_regions (false);
+
+ for (unsigned int i = 0; i < array_length (allocations); ++i)
+ if (allocations[i].ptr != NULL)
+ {
+ record_free (allocations[i].ptr, allocations[i].size);
+ _dl_protmem_free (allocations[i].ptr, allocations[i].size);
+ }
+ puts ("info: after post-loop deallocations");
+ dump_regions (true);
+ }
+
+ /* Do a few larger allocations to show that coalescing works. Note
+ that the first allocation has some metadata in it, so the free
+ chunk is not an integral power of two. */
+ {
+ void *ptrs[50];
+ for (unsigned int i = 0; i < array_length (ptrs); ++i)
+ {
+ ptrs[i] = _dl_protmem_allocate (65536);
+ record_allocate (ptrs[i], 65536);
+ }
+ puts ("info: after large allocations");
+ dump_regions (true);
+ for (unsigned int i = 0; i < array_length (ptrs); ++i)
+ {
+ record_free (ptrs[i], 65536);
+ _dl_protmem_free (ptrs[i], 65536);
+ }
+ puts ("info: after freeing allocations");
+ dump_regions (true);
+
+ ptrs[0] = _dl_protmem_allocate (8);
+ record_allocate (ptrs[0], 8);
+ puts ("info: after dummy allocation");
+ dump_regions (true);
+
+ record_free (ptrs[0], 8);
+#if __GNUC_PREREQ (11, 0)
+ /* Suppress invalid GCC warning with -O3 (GCC PR 110546):
+ error: '_dl_protmem_free' called on pointer returned from a
+ mismatched allocation function [-Werror=mismatched-dealloc]
+ note: returned from '_dl_protmem_allocate.constprop' */
+ DIAG_IGNORE_NEEDS_COMMENT (11, "-Wmismatched-dealloc");
+#endif
+ _dl_protmem_free (ptrs[0], 8);
+#if __GNUC_PREREQ (11, 0) && __OPTIMIZE__ >= 3
+ DIAG_POP_NEEDS_COMMENT;
+#endif
+ puts ("info: after dummy deallocation");
+ dump_regions (true);
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,42 @@
+/* Module with the checking function for read-only link maps.
+ Copyright (C) 2025 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <link.h>
+#include <stdio.h>
+#include <unistd.h>
+
+/* Export for use by the main program, to avoid copy relocations on
+ _r_debug. */
+struct r_debug_extended *const r_debug_extended_address
+ = (struct r_debug_extended *) &_r_debug;
+
+/* The real definition is in the main program. */
+void
+check_relro_link_maps (const char *context)
+{
+ puts ("error: check_relro_link_maps not interposed");
+ _exit (1);
+}
+
+static void __attribute__ ((constructor))
+init (void)
+{
+ check_relro_link_maps ("ELF constructor (DSO)");
+}
+
+/* NB: destructor not checked. Memory is writable when they run. */
new file mode 100644
@@ -0,0 +1,2 @@
+/* Same checking as the first module, but loaded via dlopen. */
+#include "tst-relro-linkmap-mod1.c"
new file mode 100644
@@ -0,0 +1,2 @@
+/* No checking possible because the check_relro_link_maps function
+ from the main program is inaccessible after dlopen. */
new file mode 100644
@@ -0,0 +1,112 @@
+/* Verify that link maps are read-only most of the time.
+ Copyright (C) 2025 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <support/memprobe.h>
+#include <support/check.h>
+#include <support/xdlfcn.h>
+#include <support/xunistd.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <support/support.h>
+
+static int do_test (void);
+#include <support/test-driver.c>
+
+/* This hack results in a definition of struct rtld_global_ro and
+ related data structures. Do this after all the other header
+ inclusions, to minimize the impact. This only works from the main
+ program due to tests-internal. */
+#define SHARED
+#include <ldsodefs.h>
+
+/* Defined in tst-relro-linkmap-mod1.so. */
+extern struct r_debug_extended *const r_debug_extended_address;
+
+/* Check that link maps are read-only in all namespaces. */
+void
+check_relro_link_maps (const char *context)
+{
+ for (struct r_debug_extended *r = r_debug_extended_address;
+ r != NULL; r = r->r_next)
+ for (struct link_map *l = (struct link_map *) r->base.r_map;
+ l != NULL; l = l->l_next)
+ {
+ char *ctx;
+
+ ctx = xasprintf ("%s: link map for %s", context, l->l_name);
+ support_memprobe_readonly (ctx, l, sizeof (*l));
+ free (ctx);
+ if (false) /* Link map names are currently writable. */
+ {
+ ctx = xasprintf ("%s: link map name for %s", context, l->l_name);
+ support_memprobe_readonly (ctx, l->l_name, strlen (l->l_name) + 1);
+ free (ctx);
+ }
+ }
+}
+
+static void __attribute__ ((constructor))
+init (void)
+{
+ check_relro_link_maps ("ELF constructor (main)");
+}
+
+static void __attribute__ ((destructor))
+deinit (void)
+{
+ /* _dl_fini does not make link maps writable. */
+ check_relro_link_maps ("ELF destructor (main)");
+}
+
+static int
+do_test (void)
+{
+ check_relro_link_maps ("initial do_test");
+
+ /* Avoid copy relocations. Do this from the main program because we
+ need access to internal headers. */
+ {
+ struct rtld_global_ro *ro = xdlsym (RTLD_DEFAULT, "_rtld_global_ro");
+ check_relro_link_maps ("after _rtld_global_ro");
+ support_memprobe_readonly ("_rtld_global_ro", ro, sizeof (*ro));
+ support_memprobe_readonly ("GLPM", ro->_dl_protmem,
+ sizeof (*ro->_dl_protmem));
+ }
+ support_memprobe_readwrite ("_rtld_global",
+ xdlsym (RTLD_DEFAULT, "_rtld_global"),
+ sizeof (struct rtld_global_ro));
+ check_relro_link_maps ("after _rtld_global");
+
+ /* This is supposed to fail. */
+ TEST_VERIFY (dlopen ("tst-dlopenfailmod1.so", RTLD_LAZY) == NULL);
+ check_relro_link_maps ("after failed dlopen");
+
+ /* This should succeed. */
+ void *handle = xdlopen ("tst-relro-linkmap-mod2.so", RTLD_LAZY);
+ check_relro_link_maps ("after successful dlopen");
+ xdlclose (handle);
+ check_relro_link_maps ("after dlclose 1");
+
+ handle = xdlmopen (LM_ID_NEWLM, "tst-relro-linkmap-mod3.so", RTLD_LAZY);
+ check_relro_link_maps ("after dlmopen");
+ xdlclose (handle);
+ check_relro_link_maps ("after dlclose 2");
+
+ return 0;
+}
@@ -176,6 +176,9 @@ struct link_map
than one namespace. */
struct link_map *l_real;
+ /* Allocated size of this link map. */
+ size_t l_size;
+
/* Run-time writable fields. */
struct link_map_rw *l_rw;
@@ -509,7 +509,10 @@ extern struct rtld_global _rtld_global __rtld_global_attribute__;
#endif
#ifdef SHARED
-/* Implementation structure for the protected memory area. */
+/* Implementation structure for the protected memory area. In static
+ builds, the protected memory area is just regular (.data) memory,
+ as there is no RELRO support anyway. Some fields are only needed
+ for SHARED builds and are not included for static builds. */
struct rtld_protmem
{
/* Structure describing the dynamic linker itself. */
@@ -1022,6 +1025,9 @@ extern struct link_map *_dl_new_object (char *realname, const char *libname,
int mode, Lmid_t nsid)
attribute_hidden;
+/* Deallocates the specified link map (only the link map itself). */
+void _dl_free_object (struct link_map *) attribute_hidden;
+
/* Relocate the given object (if it hasn't already been).
SCOPE is passed to _dl_lookup_symbol in symbol lookups.
If RTLD_LAZY is set in RELOC-MODE, don't relocate its PLT. */