@@ -177,6 +177,10 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
} \
} while (0);
+# ifndef ELF_DYNAMIC_AFTER_RELOC
+# define ELF_DYNAMIC_AFTER_RELOC(map, lazy)
+# endif
+
/* This can't just be an inline function because GCC is too dumb
to inline functions containing inlines themselves. */
# ifdef RTLD_BOOTSTRAP
@@ -192,6 +196,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
ELF_DYNAMIC_DO_RELR (map); \
ELF_DYNAMIC_DO_REL ((map), (scope), edr_lazy, skip_ifunc); \
ELF_DYNAMIC_DO_RELA ((map), (scope), edr_lazy, skip_ifunc); \
+ ELF_DYNAMIC_AFTER_RELOC ((map), (edr_lazy)); \
} while (0)
#endif
@@ -3639,6 +3639,11 @@ enum
/* x86-64 sh_type values. */
#define SHT_X86_64_UNWIND 0x70000001 /* Unwind information. */
+/* x86-64 d_tag values. */
+#define DT_X86_64_PLT (DT_LOPROC + 0)
+#define DT_X86_64_PLTSZ (DT_LOPROC + 1)
+#define DT_X86_64_PLTENT (DT_LOPROC + 3)
+#define DT_X86_64_NUM 4
/* AM33 relocations. */
#define R_MN10300_NONE 0 /* No reloc. */
@@ -187,6 +187,7 @@ DT_VALNUM
DT_VALRNGHI
DT_VALRNGLO
DT_VERSIONTAGNUM
+DT_X86_64_NUM
ELFCLASSNUM
ELFDATANUM
EM_NUM
@@ -439,6 +439,8 @@ class DtRISCV(Dt):
"""Supplemental DT_* constants for EM_RISCV."""
class DtSPARC(Dt):
"""Supplemental DT_* constants for EM_SPARC."""
+class DtX86_64(Dt):
+ """Supplemental DT_* constants for EM_X86_64."""
_dt_skip = '''
DT_ENCODING DT_PROCNUM
DT_ADDRRNGLO DT_ADDRRNGHI DT_ADDRNUM
@@ -451,6 +453,7 @@ DT_MIPS_NUM
DT_PPC_NUM
DT_PPC64_NUM
DT_SPARC_NUM
+DT_X86_64_NUM
'''.strip().split()
_register_elf_h(DtAARCH64, prefix='DT_AARCH64_', skip=_dt_skip, parent=Dt)
_register_elf_h(DtALPHA, prefix='DT_ALPHA_', skip=_dt_skip, parent=Dt)
@@ -461,6 +464,7 @@ _register_elf_h(DtPPC, prefix='DT_PPC_', skip=_dt_skip, parent=Dt)
_register_elf_h(DtPPC64, prefix='DT_PPC64_', skip=_dt_skip, parent=Dt)
_register_elf_h(DtRISCV, prefix='DT_RISCV_', skip=_dt_skip, parent=Dt)
_register_elf_h(DtSPARC, prefix='DT_SPARC_', skip=_dt_skip, parent=Dt)
+_register_elf_h(DtX86_64, prefix='DT_X86_64_', skip=_dt_skip, parent=Dt)
_register_elf_h(Dt, skip=_dt_skip, ranges=True)
del _dt_skip
@@ -32,10 +32,24 @@ enum dl_x86_cet_control
cet_permissive
};
+/* PLT rewrite control. */
+enum dl_x86_plt_rewrite_control
+{
+ /* No PLT rewrite. */
+ plt_rewrite_none,
+ /* PLT rewrite is enabled at run-time. */
+ plt_rewrite_enabled,
+ /* Rewrite PLT with JMP at run-time. */
+ plt_rewrite_jmp,
+ /* Rewrite PLT with JMPABS at run-time. */
+ plt_rewrite_jmpabs
+};
+
struct dl_x86_feature_control
{
enum dl_x86_cet_control ibt : 2;
enum dl_x86_cet_control shstk : 2;
+ enum dl_x86_plt_rewrite_control plt_rewrite : 2;
};
#endif /* cet-control.h */
@@ -27,6 +27,13 @@
extern void TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *)
attribute_hidden;
+static void
+TUNABLE_CALLBACK (set_plt_rewrite) (tunable_val_t *valp)
+{
+ if (valp->numval)
+ GL(dl_x86_feature_control).plt_rewrite = plt_rewrite_enabled;
+}
+
#ifdef __LP64__
static void
TUNABLE_CALLBACK (set_prefer_map_32bit_exec) (tunable_val_t *valp)
@@ -996,6 +1003,9 @@ no_cpuid:
TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps));
+ TUNABLE_GET (x86_plt_rewrite, tunable_val_t *,
+ TUNABLE_CALLBACK (set_plt_rewrite));
+
#ifdef __LP64__
TUNABLE_GET (prefer_map_32bit_exec, tunable_val_t *,
TUNABLE_CALLBACK (set_prefer_map_32bit_exec));
@@ -67,6 +67,7 @@ PROCINFO_CLASS struct dl_x86_feature_control _dl_x86_feature_control
= {
.ibt = DEFAULT_DL_X86_CET_CONTROL,
.shstk = DEFAULT_DL_X86_CET_CONTROL,
+ .plt_rewrite = plt_rewrite_none,
}
# endif
# if !defined SHARED || defined PROCINFO_DECL
@@ -66,5 +66,10 @@ glibc {
x86_shared_cache_size {
type: SIZE_T
}
+ x86_plt_rewrite {
+ type: INT_32
+ minval: 0
+ maxval: 1
+ }
}
}
new file mode 100644
@@ -0,0 +1,21 @@
+/* Configuration of lookup functions. x64-64 version.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* Number of extra dynamic section entries for this architecture. By
+ default there are none. */
+#define DT_THISPROCNUM DT_X86_64_NUM
@@ -30,6 +30,9 @@
#include <dl-machine-rel.h>
#include <isa-level.h>
+/* Translate a processor specific dynamic tag to the index in l_info array. */
+#define DT_X86_64(x) (DT_X86_64_##x - DT_LOPROC + DT_NUM)
+
/* Return nonzero iff ELF header is compatible with the running host. */
static inline int __attribute__ ((unused))
elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
@@ -304,8 +307,9 @@ and creates an unsatisfiable circular dependency.\n",
switch (r_type)
{
- case R_X86_64_GLOB_DAT:
case R_X86_64_JUMP_SLOT:
+ map->l_has_jump_slot_reloc = true;
+ case R_X86_64_GLOB_DAT:
*reloc_addr = value;
break;
@@ -541,3 +545,233 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
}
#endif /* RESOLVE_MAP */
+
+#if !defined ELF_DYNAMIC_AFTER_RELOC && !defined RTLD_BOOTSTRAP \
+ && defined SHARED
+# define ELF_DYNAMIC_AFTER_RELOC(map, lazy) \
+ x86_64_dynamic_after_reloc (map, (lazy))
+
+static const char *
+x86_64_reloc_symbol_name (struct link_map *map, const ElfW(Rela) *reloc)
+{
+ const ElfW(Sym) *const symtab
+ = (const void *) map->l_info[DT_SYMTAB]->d_un.d_ptr;
+ const ElfW(Sym) *const refsym = &symtab[ELFW (R_SYM) (reloc->r_info)];
+ const char *strtab = (const char *) map->l_info[DT_STRTAB]->d_un.d_ptr;
+ return strtab + refsym->st_name;
+}
+
+static void
+x86_64_rewrite_plt (struct link_map *map, ElfW(Addr) plt_rewrite,
+ ElfW(Addr) plt_aligned)
+{
+ ElfW(Addr) plt_rewrite_bias = plt_rewrite - plt_aligned;
+ ElfW(Addr) l_addr = map->l_addr;
+ ElfW(Addr) pltent = map->l_info[DT_X86_64 (PLTENT)]->d_un.d_val;
+ ElfW(Addr) start = map->l_info[DT_JMPREL]->d_un.d_ptr;
+ ElfW(Addr) size = map->l_info[DT_PLTRELSZ]->d_un.d_val;
+ const ElfW(Rela) *reloc = (const void *) start;
+ const ElfW(Rela) *reloc_end = (const void *) (start + size);
+
+ unsigned int feature_1 = THREAD_GETMEM (THREAD_SELF,
+ header.feature_1);
+ bool ibt_enabled_p
+ = (feature_1 & GNU_PROPERTY_X86_FEATURE_1_IBT) != 0;
+
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
+ _dl_debug_printf ("\nchanging PLT for '%s' to direct branch\n",
+ DSO_FILENAME (map->l_name));
+
+ for (; reloc < reloc_end; reloc++)
+ if (ELFW(R_TYPE) (reloc->r_info) == R_X86_64_JUMP_SLOT)
+ {
+ /* Get the value from the GOT entry. */
+ ElfW(Addr) value = *(ElfW(Addr) *) (l_addr + reloc->r_offset);
+
+ /* Get the corresponding PLT entry from r_addend. */
+ ElfW(Addr) branch_start = l_addr + reloc->r_addend;
+ /* Skip ENDBR64 if IBT isn't enabled. */
+ if (!ibt_enabled_p)
+ branch_start = ALIGN_DOWN (branch_start, pltent);
+ /* Get the displacement from the branch target. */
+ ElfW(Addr) disp = value - branch_start - 5;
+ ElfW(Addr) plt_end;
+ ElfW(Addr) pad;
+
+ branch_start += plt_rewrite_bias;
+ plt_end = (branch_start & -pltent) + pltent;
+
+ /* Update the PLT entry. */
+ if ((disp + 0x80000000ULL) <= 0xffffffffULL)
+ {
+ /* If the target branch can be reached with a direct branch,
+ rewrite the PLT entry with a direct branch. */
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_BINDINGS))
+ {
+ const char *sym_name = x86_64_reloc_symbol_name (map,
+ reloc);
+ _dl_debug_printf ("changing '%s' PLT for '%s' to "
+ "direct branch\n", sym_name,
+ DSO_FILENAME (map->l_name));
+ }
+
+ pad = branch_start + 5;
+
+ if (__glibc_unlikely (pad > plt_end))
+ {
+ if (__glibc_unlikely (GLRO(dl_debug_mask)
+ & DL_DEBUG_BINDINGS))
+ {
+ const char *sym_name
+ = x86_64_reloc_symbol_name (map, reloc);
+ _dl_debug_printf ("\ninvalid r_addend of "
+ "R_X86_64_JUMP_SLOT against '%s' "
+ "in '%s'\n", sym_name,
+ DSO_FILENAME (map->l_name));
+ }
+
+ continue;
+ }
+
+ /* Write out direct branch. */
+ *(uint8_t *) branch_start = 0xe9;
+ *((uint32_t *) (branch_start + 1)) = disp;
+ }
+ else
+ {
+ if (GL(dl_x86_feature_control).plt_rewrite
+ != plt_rewrite_jmpabs)
+ continue;
+
+ pad = branch_start + 11;
+
+ if (pad > plt_end)
+ continue;
+
+ /* Rewrite the PLT entry with JMPABS. */
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_BINDINGS))
+ {
+ const char *sym_name = x86_64_reloc_symbol_name (map,
+ reloc);
+ _dl_debug_printf ("changing '%s' PLT for '%s' to JMPABS\n",
+ sym_name, DSO_FILENAME (map->l_name));
+ }
+
+ /* "jmpabs $target" for 64-bit displacement. */
+ *(uint8_t *) (branch_start + 0) = 0xd5;
+ *(uint8_t *) (branch_start + 1) = 0x0;
+ *(uint8_t *) (branch_start + 2) = 0xa1;
+ *(uint64_t *) (branch_start + 3) = value;
+ }
+
+ /* Fill the unused part of the PLT entry with INT3. */
+ for (; pad < plt_end; pad++)
+ *(uint8_t *) pad = 0xcc;
+ }
+}
+
+static inline void
+x86_64_rewrite_plt_in_place (struct link_map *map)
+{
+ /* Adjust DT_X86_64_PLT address and DT_X86_64_PLTSZ values. */
+ ElfW(Addr) plt = (map->l_info[DT_X86_64 (PLT)]->d_un.d_ptr
+ + map->l_addr);
+ size_t pagesize = GLRO(dl_pagesize);
+ ElfW(Addr) plt_aligned = ALIGN_DOWN (plt, pagesize);
+ size_t pltsz = (map->l_info[DT_X86_64 (PLTSZ)]->d_un.d_val
+ + plt - plt_aligned);
+
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
+ _dl_debug_printf ("\nchanging PLT in '%s' to writable\n",
+ DSO_FILENAME (map->l_name));
+
+ if (__glibc_unlikely (__mprotect ((void *) plt_aligned, pltsz,
+ PROT_WRITE | PROT_READ) < 0))
+ {
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
+ _dl_debug_printf ("\nfailed to change PLT in '%s' to writable\n",
+ DSO_FILENAME (map->l_name));
+ return;
+ }
+
+ x86_64_rewrite_plt (map, plt_aligned, plt_aligned);
+
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
+ _dl_debug_printf ("\nchanging PLT in '%s' back to read-only\n",
+ DSO_FILENAME (map->l_name));
+
+ if (__glibc_unlikely (__mprotect ((void *) plt_aligned, pltsz,
+ PROT_EXEC | PROT_READ) < 0))
+ _dl_signal_error (0, DSO_FILENAME (map->l_name), NULL,
+ "failed to change PLT back to read-only");
+}
+
+/* Rewrite PLT entries to direct branch if possible. */
+
+static inline void
+x86_64_dynamic_after_reloc (struct link_map *map, int lazy)
+{
+ /* Ignore DT_X86_64_PLT if the lazy binding is enabled. */
+ if (lazy)
+ return;
+
+ if (__glibc_likely (map->l_info[DT_X86_64 (PLT)] == NULL))
+ return;
+
+ /* Ignore DT_X86_64_PLT if there is no R_X86_64_JUMP_SLOT. */
+ if (!map->l_has_jump_slot_reloc)
+ return;
+
+ /* Ignore DT_X86_64_PLT on ld.so to avoid changing its own PLT. */
+ if (map == &GL(dl_rtld_map) || map->l_real == &GL(dl_rtld_map))
+ return;
+
+ /* Ignore DT_X86_64_PLT if
+ 1. DT_JMPREL isn't available or its value is 0.
+ 2. DT_PLTRELSZ is 0.
+ 3. DT_X86_64_PLTSZ isn't available or its value is 0.
+ 4. DT_X86_64_PLTENT isn't available or its value is smaller
+ than 16 bytes. */
+ if (map->l_info[DT_JMPREL] == NULL
+ || map->l_info[DT_JMPREL]->d_un.d_ptr == 0
+ || map->l_info[DT_PLTRELSZ]->d_un.d_val == 0
+ || map->l_info[DT_X86_64 (PLTSZ)] == NULL
+ || map->l_info[DT_X86_64 (PLTSZ)]->d_un.d_val == 0
+ || map->l_info[DT_X86_64 (PLTENT)] == NULL
+ || map->l_info[DT_X86_64 (PLTENT)]->d_un.d_val < 16)
+ return;
+
+ if (GL(dl_x86_feature_control).plt_rewrite == plt_rewrite_enabled)
+ {
+ /* PLT rewrite is enabled. Check if mprotect works. */
+ void *plt = __mmap (NULL, 4096, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0);
+ if (__glibc_unlikely (plt == MAP_FAILED))
+ GL(dl_x86_feature_control).plt_rewrite = plt_rewrite_none;
+ else
+ {
+ *(int32_t *) plt = -1;
+
+ /* If the memory can be changed to PROT_EXEC | PROT_READ,
+ rewrite PLT. */
+ if (__mprotect (plt, 4096, PROT_EXEC | PROT_READ) == 0)
+ /* Use JMPABS on APX processors. */
+ GL(dl_x86_feature_control).plt_rewrite
+ = (CPU_FEATURE_PRESENT_P (__get_cpu_features (), APX_F)
+ ? plt_rewrite_jmpabs
+ : plt_rewrite_jmp);
+ else
+ GL(dl_x86_feature_control).plt_rewrite = plt_rewrite_none;
+
+ __munmap (plt, 4096);
+ }
+ }
+
+ /* Ignore DT_X86_64_PLT if PLT rewrite isn't enabled. */
+ if (GL(dl_x86_feature_control).plt_rewrite == plt_rewrite_none)
+ return;
+
+ x86_64_rewrite_plt_in_place (map);
+}
+#endif
new file mode 100644
@@ -0,0 +1,22 @@
+/* Additional fields in struct link_map. x86-64 version.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* Has R_X86_64_JUMP_SLOT relocation. */
+bool l_has_jump_slot_reloc;
+
+#include <sysdeps/x86/link_map.h>