[2/2] aarch64: Support PLT with PAC
Commit Message
Add pointer authentication code to the function pointers in PLTGOT if
DT_AARCH64_PAC_PLT is set on an elf binary. In such binaries the PLT
authenticates the pointer loaded from the GOT entry. This is only valid
in LP64 binaries and only useful when the PLTGOT is writable at runtime,
i.e. with lazy binding.
Note: TLS descriptors are not protected, the static linker should ensure
that GOT entries with TLSDESC relocations are read-only, i.e. they are
not lazy resolved.
2019-06-20 Szabolcs Nagy <szabolcs.nagy@arm.com>
* sysdeps/aarch64/dl-machine.h (set_jump_slot): Define.
(elf_machine_fixup_plt): Use set_jump_slot.
(elf_machine_rela): Likewise.
(elf_machine_lazy_rel): Likewise.
@@ -214,19 +214,36 @@ dl_platform_init (void)
static executable. */
init_cpu_features (&GLRO(dl_aarch64_cpu_features));
#endif
}
+/* Set *RELOC_ADDR such that a PLT entry using it jumps to VALUE, this is
+ needed because the PLT entry may apply transformations before the jump. */
+static inline ElfW(Addr)
+set_jump_slot (struct link_map *map, ElfW(Addr) *reloc_addr, ElfW(Addr) value)
+{
+#ifdef __LP64__
+ if (map->l_info[DT_AARCH64 (PAC_PLT)])
+ {
+ /* Add PAC if the PLT uses AUT to verify the loaded pointer. */
+ register Elf64_Addr *x16 asm ("x16") = reloc_addr;
+ register Elf64_Addr x17 asm ("x17") = value;
+ asm ("hint #0x8 // pacia1716" : "+r"(x17) : "r"(x16));
+ value = x17;
+ }
+#endif
+ return *reloc_addr = value;
+}
static inline ElfW(Addr)
elf_machine_fixup_plt (struct link_map *map, lookup_t t,
const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const ElfW(Rela) *reloc,
ElfW(Addr) *reloc_addr,
ElfW(Addr) value)
{
- return *reloc_addr = value;
+ return set_jump_slot (map, reloc_addr, value);
}
/* Return the final value of a plt relocation. */
static inline ElfW(Addr)
elf_machine_plt_value (struct link_map *map,
@@ -288,13 +305,16 @@ elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
memcpy (reloc_addr_arg, (void *) value,
sym->st_size < refsym->st_size
? sym->st_size : refsym->st_size);
break;
+ case AARCH64_R(JUMP_SLOT):
+ set_jump_slot (map, reloc_addr, value + reloc->r_addend);
+ break;
+
case AARCH64_R(RELATIVE):
case AARCH64_R(GLOB_DAT):
- case AARCH64_R(JUMP_SLOT):
case AARCH64_R(ABS32):
#ifdef __LP64__
case AARCH64_R(ABS64):
#endif
*reloc_addr = value + reloc->r_addend;
@@ -361,11 +381,11 @@ elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
break;
case AARCH64_R(IRELATIVE):
value = map->l_addr + reloc->r_addend;
value = elf_ifunc_invoke (value);
- *reloc_addr = value;
+ set_jump_slot (map, reloc_addr, value);
break;
default:
_dl_reloc_bad_type (map, r_type, 0);
break;
@@ -396,11 +416,11 @@ elf_machine_lazy_rel (struct link_map *map,
if (__builtin_expect (r_type == AARCH64_R(JUMP_SLOT), 1))
{
if (map->l_mach.plt == 0)
{
/* Prelinking. */
- *reloc_addr += l_addr;
+ set_jump_slot (map, reloc_addr, *reloc_addr + l_addr);
return;
}
if (__glibc_unlikely (map->l_mach.variant_pcs))
{
@@ -423,11 +443,11 @@ elf_machine_lazy_rel (struct link_map *map,
skip_ifunc);
return;
}
}
- *reloc_addr = map->l_mach.plt;
+ set_jump_slot (map, reloc_addr, map->l_mach.plt);
}
else if (__builtin_expect (r_type == AARCH64_R(TLSDESC), 1))
{
const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
@@ -448,11 +468,11 @@ elf_machine_lazy_rel (struct link_map *map,
else if (__glibc_unlikely (r_type == AARCH64_R(IRELATIVE)))
{
ElfW(Addr) value = map->l_addr + reloc->r_addend;
if (__glibc_likely (!skip_ifunc))
value = elf_ifunc_invoke (value);
- *reloc_addr = value;
+ set_jump_slot (map, reloc_addr, value);
}
else
_dl_reloc_bad_type (map, r_type, 1);
}